File: | tools/clang/lib/CodeGen/TargetInfo.cpp |
Warning: | line 3075, column 31 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // These classes wrap the information about a call or function | ||||
10 | // definition used to handle ABI compliancy. | ||||
11 | // | ||||
12 | //===----------------------------------------------------------------------===// | ||||
13 | |||||
14 | #include "TargetInfo.h" | ||||
15 | #include "ABIInfo.h" | ||||
16 | #include "CGBlocks.h" | ||||
17 | #include "CGCXXABI.h" | ||||
18 | #include "CGValue.h" | ||||
19 | #include "CodeGenFunction.h" | ||||
20 | #include "clang/AST/RecordLayout.h" | ||||
21 | #include "clang/Basic/CodeGenOptions.h" | ||||
22 | #include "clang/CodeGen/CGFunctionInfo.h" | ||||
23 | #include "clang/CodeGen/SwiftCallingConv.h" | ||||
24 | #include "llvm/ADT/StringExtras.h" | ||||
25 | #include "llvm/ADT/StringSwitch.h" | ||||
26 | #include "llvm/ADT/Triple.h" | ||||
27 | #include "llvm/ADT/Twine.h" | ||||
28 | #include "llvm/IR/DataLayout.h" | ||||
29 | #include "llvm/IR/Type.h" | ||||
30 | #include "llvm/Support/raw_ostream.h" | ||||
31 | #include <algorithm> // std::sort | ||||
32 | |||||
33 | using namespace clang; | ||||
34 | using namespace CodeGen; | ||||
35 | |||||
36 | // Helper for coercing an aggregate argument or return value into an integer | ||||
37 | // array of the same size (including padding) and alignment. This alternate | ||||
38 | // coercion happens only for the RenderScript ABI and can be removed after | ||||
39 | // runtimes that rely on it are no longer supported. | ||||
40 | // | ||||
41 | // RenderScript assumes that the size of the argument / return value in the IR | ||||
42 | // is the same as the size of the corresponding qualified type. This helper | ||||
43 | // coerces the aggregate type into an array of the same size (including | ||||
44 | // padding). This coercion is used in lieu of expansion of struct members or | ||||
45 | // other canonical coercions that return a coerced-type of larger size. | ||||
46 | // | ||||
47 | // Ty - The argument / return value type | ||||
48 | // Context - The associated ASTContext | ||||
49 | // LLVMContext - The associated LLVMContext | ||||
50 | static ABIArgInfo coerceToIntArray(QualType Ty, | ||||
51 | ASTContext &Context, | ||||
52 | llvm::LLVMContext &LLVMContext) { | ||||
53 | // Alignment and Size are measured in bits. | ||||
54 | const uint64_t Size = Context.getTypeSize(Ty); | ||||
55 | const uint64_t Alignment = Context.getTypeAlign(Ty); | ||||
56 | llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment); | ||||
57 | const uint64_t NumElements = (Size + Alignment - 1) / Alignment; | ||||
58 | return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); | ||||
59 | } | ||||
60 | |||||
61 | static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, | ||||
62 | llvm::Value *Array, | ||||
63 | llvm::Value *Value, | ||||
64 | unsigned FirstIndex, | ||||
65 | unsigned LastIndex) { | ||||
66 | // Alternatively, we could emit this as a loop in the source. | ||||
67 | for (unsigned I = FirstIndex; I <= LastIndex; ++I) { | ||||
68 | llvm::Value *Cell = | ||||
69 | Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); | ||||
70 | Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); | ||||
71 | } | ||||
72 | } | ||||
73 | |||||
74 | static bool isAggregateTypeForABI(QualType T) { | ||||
75 | return !CodeGenFunction::hasScalarEvaluationKind(T) || | ||||
76 | T->isMemberFunctionPointerType(); | ||||
77 | } | ||||
78 | |||||
79 | ABIArgInfo | ||||
80 | ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign, | ||||
81 | llvm::Type *Padding) const { | ||||
82 | return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), | ||||
83 | ByRef, Realign, Padding); | ||||
84 | } | ||||
85 | |||||
86 | ABIArgInfo | ||||
87 | ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { | ||||
88 | return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), | ||||
89 | /*ByRef*/ false, Realign); | ||||
90 | } | ||||
91 | |||||
92 | Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
93 | QualType Ty) const { | ||||
94 | return Address::invalid(); | ||||
95 | } | ||||
96 | |||||
97 | ABIInfo::~ABIInfo() {} | ||||
98 | |||||
99 | /// Does the given lowering require more than the given number of | ||||
100 | /// registers when expanded? | ||||
101 | /// | ||||
102 | /// This is intended to be the basis of a reasonable basic implementation | ||||
103 | /// of should{Pass,Return}IndirectlyForSwift. | ||||
104 | /// | ||||
105 | /// For most targets, a limit of four total registers is reasonable; this | ||||
106 | /// limits the amount of code required in order to move around the value | ||||
107 | /// in case it wasn't produced immediately prior to the call by the caller | ||||
108 | /// (or wasn't produced in exactly the right registers) or isn't used | ||||
109 | /// immediately within the callee. But some targets may need to further | ||||
110 | /// limit the register count due to an inability to support that many | ||||
111 | /// return registers. | ||||
112 | static bool occupiesMoreThan(CodeGenTypes &cgt, | ||||
113 | ArrayRef<llvm::Type*> scalarTypes, | ||||
114 | unsigned maxAllRegisters) { | ||||
115 | unsigned intCount = 0, fpCount = 0; | ||||
116 | for (llvm::Type *type : scalarTypes) { | ||||
117 | if (type->isPointerTy()) { | ||||
118 | intCount++; | ||||
119 | } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { | ||||
120 | auto ptrWidth = cgt.getTarget().getPointerWidth(0); | ||||
121 | intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; | ||||
122 | } else { | ||||
123 | assert(type->isVectorTy() || type->isFloatingPointTy())((type->isVectorTy() || type->isFloatingPointTy()) ? static_cast <void> (0) : __assert_fail ("type->isVectorTy() || type->isFloatingPointTy()" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 123, __PRETTY_FUNCTION__)); | ||||
124 | fpCount++; | ||||
125 | } | ||||
126 | } | ||||
127 | |||||
128 | return (intCount + fpCount > maxAllRegisters); | ||||
129 | } | ||||
130 | |||||
131 | bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, | ||||
132 | llvm::Type *eltTy, | ||||
133 | unsigned numElts) const { | ||||
134 | // The default implementation of this assumes that the target guarantees | ||||
135 | // 128-bit SIMD support but nothing more. | ||||
136 | return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16); | ||||
137 | } | ||||
138 | |||||
139 | static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, | ||||
140 | CGCXXABI &CXXABI) { | ||||
141 | const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); | ||||
142 | if (!RD) { | ||||
143 | if (!RT->getDecl()->canPassInRegisters()) | ||||
144 | return CGCXXABI::RAA_Indirect; | ||||
145 | return CGCXXABI::RAA_Default; | ||||
146 | } | ||||
147 | return CXXABI.getRecordArgABI(RD); | ||||
148 | } | ||||
149 | |||||
150 | static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, | ||||
151 | CGCXXABI &CXXABI) { | ||||
152 | const RecordType *RT = T->getAs<RecordType>(); | ||||
153 | if (!RT) | ||||
154 | return CGCXXABI::RAA_Default; | ||||
155 | return getRecordArgABI(RT, CXXABI); | ||||
156 | } | ||||
157 | |||||
158 | static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, | ||||
159 | const ABIInfo &Info) { | ||||
160 | QualType Ty = FI.getReturnType(); | ||||
161 | |||||
162 | if (const auto *RT = Ty->getAs<RecordType>()) | ||||
163 | if (!isa<CXXRecordDecl>(RT->getDecl()) && | ||||
164 | !RT->getDecl()->canPassInRegisters()) { | ||||
165 | FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty); | ||||
166 | return true; | ||||
167 | } | ||||
168 | |||||
169 | return CXXABI.classifyReturnType(FI); | ||||
170 | } | ||||
171 | |||||
172 | /// Pass transparent unions as if they were the type of the first element. Sema | ||||
173 | /// should ensure that all elements of the union have the same "machine type". | ||||
174 | static QualType useFirstFieldIfTransparentUnion(QualType Ty) { | ||||
175 | if (const RecordType *UT = Ty->getAsUnionType()) { | ||||
176 | const RecordDecl *UD = UT->getDecl(); | ||||
177 | if (UD->hasAttr<TransparentUnionAttr>()) { | ||||
178 | assert(!UD->field_empty() && "sema created an empty transparent union")((!UD->field_empty() && "sema created an empty transparent union" ) ? static_cast<void> (0) : __assert_fail ("!UD->field_empty() && \"sema created an empty transparent union\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 178, __PRETTY_FUNCTION__)); | ||||
179 | return UD->field_begin()->getType(); | ||||
180 | } | ||||
181 | } | ||||
182 | return Ty; | ||||
183 | } | ||||
184 | |||||
185 | CGCXXABI &ABIInfo::getCXXABI() const { | ||||
186 | return CGT.getCXXABI(); | ||||
187 | } | ||||
188 | |||||
189 | ASTContext &ABIInfo::getContext() const { | ||||
190 | return CGT.getContext(); | ||||
191 | } | ||||
192 | |||||
193 | llvm::LLVMContext &ABIInfo::getVMContext() const { | ||||
194 | return CGT.getLLVMContext(); | ||||
195 | } | ||||
196 | |||||
197 | const llvm::DataLayout &ABIInfo::getDataLayout() const { | ||||
198 | return CGT.getDataLayout(); | ||||
199 | } | ||||
200 | |||||
201 | const TargetInfo &ABIInfo::getTarget() const { | ||||
202 | return CGT.getTarget(); | ||||
203 | } | ||||
204 | |||||
205 | const CodeGenOptions &ABIInfo::getCodeGenOpts() const { | ||||
206 | return CGT.getCodeGenOpts(); | ||||
207 | } | ||||
208 | |||||
209 | bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); } | ||||
210 | |||||
211 | bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { | ||||
212 | return false; | ||||
213 | } | ||||
214 | |||||
215 | bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, | ||||
216 | uint64_t Members) const { | ||||
217 | return false; | ||||
218 | } | ||||
219 | |||||
220 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void ABIArgInfo::dump() const { | ||||
221 | raw_ostream &OS = llvm::errs(); | ||||
222 | OS << "(ABIArgInfo Kind="; | ||||
223 | switch (TheKind) { | ||||
224 | case Direct: | ||||
225 | OS << "Direct Type="; | ||||
226 | if (llvm::Type *Ty = getCoerceToType()) | ||||
227 | Ty->print(OS); | ||||
228 | else | ||||
229 | OS << "null"; | ||||
230 | break; | ||||
231 | case Extend: | ||||
232 | OS << "Extend"; | ||||
233 | break; | ||||
234 | case Ignore: | ||||
235 | OS << "Ignore"; | ||||
236 | break; | ||||
237 | case InAlloca: | ||||
238 | OS << "InAlloca Offset=" << getInAllocaFieldIndex(); | ||||
239 | break; | ||||
240 | case Indirect: | ||||
241 | OS << "Indirect Align=" << getIndirectAlign().getQuantity() | ||||
242 | << " ByVal=" << getIndirectByVal() | ||||
243 | << " Realign=" << getIndirectRealign(); | ||||
244 | break; | ||||
245 | case Expand: | ||||
246 | OS << "Expand"; | ||||
247 | break; | ||||
248 | case CoerceAndExpand: | ||||
249 | OS << "CoerceAndExpand Type="; | ||||
250 | getCoerceAndExpandType()->print(OS); | ||||
251 | break; | ||||
252 | } | ||||
253 | OS << ")\n"; | ||||
254 | } | ||||
255 | |||||
256 | // Dynamically round a pointer up to a multiple of the given alignment. | ||||
257 | static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF, | ||||
258 | llvm::Value *Ptr, | ||||
259 | CharUnits Align) { | ||||
260 | llvm::Value *PtrAsInt = Ptr; | ||||
261 | // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align; | ||||
262 | PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); | ||||
263 | PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, | ||||
264 | llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1)); | ||||
265 | PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, | ||||
266 | llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())); | ||||
267 | PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt, | ||||
268 | Ptr->getType(), | ||||
269 | Ptr->getName() + ".aligned"); | ||||
270 | return PtrAsInt; | ||||
271 | } | ||||
272 | |||||
273 | /// Emit va_arg for a platform using the common void* representation, | ||||
274 | /// where arguments are simply emitted in an array of slots on the stack. | ||||
275 | /// | ||||
276 | /// This version implements the core direct-value passing rules. | ||||
277 | /// | ||||
278 | /// \param SlotSize - The size and alignment of a stack slot. | ||||
279 | /// Each argument will be allocated to a multiple of this number of | ||||
280 | /// slots, and all the slots will be aligned to this value. | ||||
281 | /// \param AllowHigherAlign - The slot alignment is not a cap; | ||||
282 | /// an argument type with an alignment greater than the slot size | ||||
283 | /// will be emitted on a higher-alignment address, potentially | ||||
284 | /// leaving one or more empty slots behind as padding. If this | ||||
285 | /// is false, the returned address might be less-aligned than | ||||
286 | /// DirectAlign. | ||||
287 | static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, | ||||
288 | Address VAListAddr, | ||||
289 | llvm::Type *DirectTy, | ||||
290 | CharUnits DirectSize, | ||||
291 | CharUnits DirectAlign, | ||||
292 | CharUnits SlotSize, | ||||
293 | bool AllowHigherAlign) { | ||||
294 | // Cast the element type to i8* if necessary. Some platforms define | ||||
295 | // va_list as a struct containing an i8* instead of just an i8*. | ||||
296 | if (VAListAddr.getElementType() != CGF.Int8PtrTy) | ||||
297 | VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); | ||||
298 | |||||
299 | llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); | ||||
300 | |||||
301 | // If the CC aligns values higher than the slot size, do so if needed. | ||||
302 | Address Addr = Address::invalid(); | ||||
303 | if (AllowHigherAlign && DirectAlign > SlotSize) { | ||||
304 | Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign), | ||||
305 | DirectAlign); | ||||
306 | } else { | ||||
307 | Addr = Address(Ptr, SlotSize); | ||||
308 | } | ||||
309 | |||||
310 | // Advance the pointer past the argument, then store that back. | ||||
311 | CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); | ||||
312 | Address NextPtr = | ||||
313 | CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next"); | ||||
314 | CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr); | ||||
315 | |||||
316 | // If the argument is smaller than a slot, and this is a big-endian | ||||
317 | // target, the argument will be right-adjusted in its slot. | ||||
318 | if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && | ||||
319 | !DirectTy->isStructTy()) { | ||||
320 | Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); | ||||
321 | } | ||||
322 | |||||
323 | Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); | ||||
324 | return Addr; | ||||
325 | } | ||||
326 | |||||
327 | /// Emit va_arg for a platform using the common void* representation, | ||||
328 | /// where arguments are simply emitted in an array of slots on the stack. | ||||
329 | /// | ||||
330 | /// \param IsIndirect - Values of this type are passed indirectly. | ||||
331 | /// \param ValueInfo - The size and alignment of this type, generally | ||||
332 | /// computed with getContext().getTypeInfoInChars(ValueTy). | ||||
333 | /// \param SlotSizeAndAlign - The size and alignment of a stack slot. | ||||
334 | /// Each argument will be allocated to a multiple of this number of | ||||
335 | /// slots, and all the slots will be aligned to this value. | ||||
336 | /// \param AllowHigherAlign - The slot alignment is not a cap; | ||||
337 | /// an argument type with an alignment greater than the slot size | ||||
338 | /// will be emitted on a higher-alignment address, potentially | ||||
339 | /// leaving one or more empty slots behind as padding. | ||||
340 | static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
341 | QualType ValueTy, bool IsIndirect, | ||||
342 | std::pair<CharUnits, CharUnits> ValueInfo, | ||||
343 | CharUnits SlotSizeAndAlign, | ||||
344 | bool AllowHigherAlign) { | ||||
345 | // The size and alignment of the value that was passed directly. | ||||
346 | CharUnits DirectSize, DirectAlign; | ||||
347 | if (IsIndirect) { | ||||
348 | DirectSize = CGF.getPointerSize(); | ||||
349 | DirectAlign = CGF.getPointerAlign(); | ||||
350 | } else { | ||||
351 | DirectSize = ValueInfo.first; | ||||
352 | DirectAlign = ValueInfo.second; | ||||
353 | } | ||||
354 | |||||
355 | // Cast the address we've calculated to the right type. | ||||
356 | llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy); | ||||
357 | if (IsIndirect) | ||||
358 | DirectTy = DirectTy->getPointerTo(0); | ||||
359 | |||||
360 | Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, | ||||
361 | DirectSize, DirectAlign, | ||||
362 | SlotSizeAndAlign, | ||||
363 | AllowHigherAlign); | ||||
364 | |||||
365 | if (IsIndirect) { | ||||
366 | Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second); | ||||
367 | } | ||||
368 | |||||
369 | return Addr; | ||||
370 | |||||
371 | } | ||||
372 | |||||
373 | static Address emitMergePHI(CodeGenFunction &CGF, | ||||
374 | Address Addr1, llvm::BasicBlock *Block1, | ||||
375 | Address Addr2, llvm::BasicBlock *Block2, | ||||
376 | const llvm::Twine &Name = "") { | ||||
377 | assert(Addr1.getType() == Addr2.getType())((Addr1.getType() == Addr2.getType()) ? static_cast<void> (0) : __assert_fail ("Addr1.getType() == Addr2.getType()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 377, __PRETTY_FUNCTION__)); | ||||
378 | llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); | ||||
379 | PHI->addIncoming(Addr1.getPointer(), Block1); | ||||
380 | PHI->addIncoming(Addr2.getPointer(), Block2); | ||||
381 | CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); | ||||
382 | return Address(PHI, Align); | ||||
383 | } | ||||
384 | |||||
385 | TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } | ||||
386 | |||||
387 | // If someone can figure out a general rule for this, that would be great. | ||||
388 | // It's probably just doomed to be platform-dependent, though. | ||||
389 | unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { | ||||
390 | // Verified for: | ||||
391 | // x86-64 FreeBSD, Linux, Darwin | ||||
392 | // x86-32 FreeBSD, Linux, Darwin | ||||
393 | // PowerPC Linux, Darwin | ||||
394 | // ARM Darwin (*not* EABI) | ||||
395 | // AArch64 Linux | ||||
396 | return 32; | ||||
397 | } | ||||
398 | |||||
399 | bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, | ||||
400 | const FunctionNoProtoType *fnType) const { | ||||
401 | // The following conventions are known to require this to be false: | ||||
402 | // x86_stdcall | ||||
403 | // MIPS | ||||
404 | // For everything else, we just prefer false unless we opt out. | ||||
405 | return false; | ||||
406 | } | ||||
407 | |||||
408 | void | ||||
409 | TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, | ||||
410 | llvm::SmallString<24> &Opt) const { | ||||
411 | // This assumes the user is passing a library name like "rt" instead of a | ||||
412 | // filename like "librt.a/so", and that they don't care whether it's static or | ||||
413 | // dynamic. | ||||
414 | Opt = "-l"; | ||||
415 | Opt += Lib; | ||||
416 | } | ||||
417 | |||||
418 | unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const { | ||||
419 | // OpenCL kernels are called via an explicit runtime API with arguments | ||||
420 | // set with clSetKernelArg(), not as normal sub-functions. | ||||
421 | // Return SPIR_KERNEL by default as the kernel calling convention to | ||||
422 | // ensure the fingerprint is fixed such way that each OpenCL argument | ||||
423 | // gets one matching argument in the produced kernel function argument | ||||
424 | // list to enable feasible implementation of clSetKernelArg() with | ||||
425 | // aggregates etc. In case we would use the default C calling conv here, | ||||
426 | // clSetKernelArg() might break depending on the target-specific | ||||
427 | // conventions; different targets might split structs passed as values | ||||
428 | // to multiple function arguments etc. | ||||
429 | return llvm::CallingConv::SPIR_KERNEL; | ||||
430 | } | ||||
431 | |||||
432 | llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM, | ||||
433 | llvm::PointerType *T, QualType QT) const { | ||||
434 | return llvm::ConstantPointerNull::get(T); | ||||
435 | } | ||||
436 | |||||
437 | LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, | ||||
438 | const VarDecl *D) const { | ||||
439 | assert(!CGM.getLangOpts().OpenCL &&((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only" ) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 441, __PRETTY_FUNCTION__)) | ||||
440 | !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only" ) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 441, __PRETTY_FUNCTION__)) | ||||
441 | "Address space agnostic languages only")((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only" ) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 441, __PRETTY_FUNCTION__)); | ||||
442 | return D ? D->getType().getAddressSpace() : LangAS::Default; | ||||
443 | } | ||||
444 | |||||
445 | llvm::Value *TargetCodeGenInfo::performAddrSpaceCast( | ||||
446 | CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr, | ||||
447 | LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const { | ||||
448 | // Since target may map different address spaces in AST to the same address | ||||
449 | // space, an address space conversion may end up as a bitcast. | ||||
450 | if (auto *C = dyn_cast<llvm::Constant>(Src)) | ||||
451 | return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy); | ||||
452 | // Try to preserve the source's name to make IR more readable. | ||||
453 | return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( | ||||
454 | Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : ""); | ||||
455 | } | ||||
456 | |||||
457 | llvm::Constant * | ||||
458 | TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src, | ||||
459 | LangAS SrcAddr, LangAS DestAddr, | ||||
460 | llvm::Type *DestTy) const { | ||||
461 | // Since target may map different address spaces in AST to the same address | ||||
462 | // space, an address space conversion may end up as a bitcast. | ||||
463 | return llvm::ConstantExpr::getPointerCast(Src, DestTy); | ||||
464 | } | ||||
465 | |||||
466 | llvm::SyncScope::ID | ||||
467 | TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, | ||||
468 | SyncScope Scope, | ||||
469 | llvm::AtomicOrdering Ordering, | ||||
470 | llvm::LLVMContext &Ctx) const { | ||||
471 | return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */ | ||||
472 | } | ||||
473 | |||||
474 | static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); | ||||
475 | |||||
476 | /// isEmptyField - Return true iff a the field is "empty", that is it | ||||
477 | /// is an unnamed bit-field or an (array of) empty record(s). | ||||
478 | static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, | ||||
479 | bool AllowArrays) { | ||||
480 | if (FD->isUnnamedBitfield()) | ||||
481 | return true; | ||||
482 | |||||
483 | QualType FT = FD->getType(); | ||||
484 | |||||
485 | // Constant arrays of empty records count as empty, strip them off. | ||||
486 | // Constant arrays of zero length always count as empty. | ||||
487 | if (AllowArrays) | ||||
488 | while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { | ||||
489 | if (AT->getSize() == 0) | ||||
490 | return true; | ||||
491 | FT = AT->getElementType(); | ||||
492 | } | ||||
493 | |||||
494 | const RecordType *RT = FT->getAs<RecordType>(); | ||||
495 | if (!RT) | ||||
496 | return false; | ||||
497 | |||||
498 | // C++ record fields are never empty, at least in the Itanium ABI. | ||||
499 | // | ||||
500 | // FIXME: We should use a predicate for whether this behavior is true in the | ||||
501 | // current ABI. | ||||
502 | if (isa<CXXRecordDecl>(RT->getDecl())) | ||||
503 | return false; | ||||
504 | |||||
505 | return isEmptyRecord(Context, FT, AllowArrays); | ||||
506 | } | ||||
507 | |||||
508 | /// isEmptyRecord - Return true iff a structure contains only empty | ||||
509 | /// fields. Note that a structure with a flexible array member is not | ||||
510 | /// considered empty. | ||||
511 | static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { | ||||
512 | const RecordType *RT = T->getAs<RecordType>(); | ||||
513 | if (!RT) | ||||
514 | return false; | ||||
515 | const RecordDecl *RD = RT->getDecl(); | ||||
516 | if (RD->hasFlexibleArrayMember()) | ||||
517 | return false; | ||||
518 | |||||
519 | // If this is a C++ record, check the bases first. | ||||
520 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) | ||||
521 | for (const auto &I : CXXRD->bases()) | ||||
522 | if (!isEmptyRecord(Context, I.getType(), true)) | ||||
523 | return false; | ||||
524 | |||||
525 | for (const auto *I : RD->fields()) | ||||
526 | if (!isEmptyField(Context, I, AllowArrays)) | ||||
527 | return false; | ||||
528 | return true; | ||||
529 | } | ||||
530 | |||||
531 | /// isSingleElementStruct - Determine if a structure is a "single | ||||
532 | /// element struct", i.e. it has exactly one non-empty field or | ||||
533 | /// exactly one field which is itself a single element | ||||
534 | /// struct. Structures with flexible array members are never | ||||
535 | /// considered single element structs. | ||||
536 | /// | ||||
537 | /// \return The field declaration for the single non-empty field, if | ||||
538 | /// it exists. | ||||
539 | static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { | ||||
540 | const RecordType *RT = T->getAs<RecordType>(); | ||||
541 | if (!RT) | ||||
542 | return nullptr; | ||||
543 | |||||
544 | const RecordDecl *RD = RT->getDecl(); | ||||
545 | if (RD->hasFlexibleArrayMember()) | ||||
546 | return nullptr; | ||||
547 | |||||
548 | const Type *Found = nullptr; | ||||
549 | |||||
550 | // If this is a C++ record, check the bases first. | ||||
551 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { | ||||
552 | for (const auto &I : CXXRD->bases()) { | ||||
553 | // Ignore empty records. | ||||
554 | if (isEmptyRecord(Context, I.getType(), true)) | ||||
555 | continue; | ||||
556 | |||||
557 | // If we already found an element then this isn't a single-element struct. | ||||
558 | if (Found) | ||||
559 | return nullptr; | ||||
560 | |||||
561 | // If this is non-empty and not a single element struct, the composite | ||||
562 | // cannot be a single element struct. | ||||
563 | Found = isSingleElementStruct(I.getType(), Context); | ||||
564 | if (!Found) | ||||
565 | return nullptr; | ||||
566 | } | ||||
567 | } | ||||
568 | |||||
569 | // Check for single element. | ||||
570 | for (const auto *FD : RD->fields()) { | ||||
571 | QualType FT = FD->getType(); | ||||
572 | |||||
573 | // Ignore empty fields. | ||||
574 | if (isEmptyField(Context, FD, true)) | ||||
575 | continue; | ||||
576 | |||||
577 | // If we already found an element then this isn't a single-element | ||||
578 | // struct. | ||||
579 | if (Found) | ||||
580 | return nullptr; | ||||
581 | |||||
582 | // Treat single element arrays as the element. | ||||
583 | while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { | ||||
584 | if (AT->getSize().getZExtValue() != 1) | ||||
585 | break; | ||||
586 | FT = AT->getElementType(); | ||||
587 | } | ||||
588 | |||||
589 | if (!isAggregateTypeForABI(FT)) { | ||||
590 | Found = FT.getTypePtr(); | ||||
591 | } else { | ||||
592 | Found = isSingleElementStruct(FT, Context); | ||||
593 | if (!Found) | ||||
594 | return nullptr; | ||||
595 | } | ||||
596 | } | ||||
597 | |||||
598 | // We don't consider a struct a single-element struct if it has | ||||
599 | // padding beyond the element type. | ||||
600 | if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) | ||||
601 | return nullptr; | ||||
602 | |||||
603 | return Found; | ||||
604 | } | ||||
605 | |||||
606 | namespace { | ||||
607 | Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, | ||||
608 | const ABIArgInfo &AI) { | ||||
609 | // This default implementation defers to the llvm backend's va_arg | ||||
610 | // instruction. It can handle only passing arguments directly | ||||
611 | // (typically only handled in the backend for primitive types), or | ||||
612 | // aggregates passed indirectly by pointer (NOTE: if the "byval" | ||||
613 | // flag has ABI impact in the callee, this implementation cannot | ||||
614 | // work.) | ||||
615 | |||||
616 | // Only a few cases are covered here at the moment -- those needed | ||||
617 | // by the default abi. | ||||
618 | llvm::Value *Val; | ||||
619 | |||||
620 | if (AI.isIndirect()) { | ||||
621 | assert(!AI.getPaddingType() &&((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 622, __PRETTY_FUNCTION__)) | ||||
622 | "Unexpected PaddingType seen in arginfo in generic VAArg emitter!")((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 622, __PRETTY_FUNCTION__)); | ||||
623 | assert(((!AI.getIndirectRealign() && "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getIndirectRealign() && \"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 625, __PRETTY_FUNCTION__)) | ||||
624 | !AI.getIndirectRealign() &&((!AI.getIndirectRealign() && "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getIndirectRealign() && \"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 625, __PRETTY_FUNCTION__)) | ||||
625 | "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!")((!AI.getIndirectRealign() && "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getIndirectRealign() && \"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 625, __PRETTY_FUNCTION__)); | ||||
626 | |||||
627 | auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); | ||||
628 | CharUnits TyAlignForABI = TyInfo.second; | ||||
629 | |||||
630 | llvm::Type *BaseTy = | ||||
631 | llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); | ||||
632 | llvm::Value *Addr = | ||||
633 | CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); | ||||
634 | return Address(Addr, TyAlignForABI); | ||||
635 | } else { | ||||
636 | assert((AI.isDirect() || AI.isExtend()) &&(((AI.isDirect() || AI.isExtend()) && "Unexpected ArgInfo Kind in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("(AI.isDirect() || AI.isExtend()) && \"Unexpected ArgInfo Kind in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 637, __PRETTY_FUNCTION__)) | ||||
637 | "Unexpected ArgInfo Kind in generic VAArg emitter!")(((AI.isDirect() || AI.isExtend()) && "Unexpected ArgInfo Kind in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("(AI.isDirect() || AI.isExtend()) && \"Unexpected ArgInfo Kind in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 637, __PRETTY_FUNCTION__)); | ||||
638 | |||||
639 | assert(!AI.getInReg() &&((!AI.getInReg() && "Unexpected InReg seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getInReg() && \"Unexpected InReg seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 640, __PRETTY_FUNCTION__)) | ||||
640 | "Unexpected InReg seen in arginfo in generic VAArg emitter!")((!AI.getInReg() && "Unexpected InReg seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getInReg() && \"Unexpected InReg seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 640, __PRETTY_FUNCTION__)); | ||||
641 | assert(!AI.getPaddingType() &&((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 642, __PRETTY_FUNCTION__)) | ||||
642 | "Unexpected PaddingType seen in arginfo in generic VAArg emitter!")((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 642, __PRETTY_FUNCTION__)); | ||||
643 | assert(!AI.getDirectOffset() &&((!AI.getDirectOffset() && "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getDirectOffset() && \"Unexpected DirectOffset seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 644, __PRETTY_FUNCTION__)) | ||||
644 | "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!")((!AI.getDirectOffset() && "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getDirectOffset() && \"Unexpected DirectOffset seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 644, __PRETTY_FUNCTION__)); | ||||
645 | assert(!AI.getCoerceToType() &&((!AI.getCoerceToType() && "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getCoerceToType() && \"Unexpected CoerceToType seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 646, __PRETTY_FUNCTION__)) | ||||
646 | "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!")((!AI.getCoerceToType() && "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!" ) ? static_cast<void> (0) : __assert_fail ("!AI.getCoerceToType() && \"Unexpected CoerceToType seen in arginfo in generic VAArg emitter!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 646, __PRETTY_FUNCTION__)); | ||||
647 | |||||
648 | Address Temp = CGF.CreateMemTemp(Ty, "varet"); | ||||
649 | Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); | ||||
650 | CGF.Builder.CreateStore(Val, Temp); | ||||
651 | return Temp; | ||||
652 | } | ||||
653 | } | ||||
654 | |||||
655 | /// DefaultABIInfo - The default implementation for ABI specific | ||||
656 | /// details. This implementation provides information which results in | ||||
657 | /// self-consistent and sensible LLVM IR generation, but does not | ||||
658 | /// conform to any particular ABI. | ||||
659 | class DefaultABIInfo : public ABIInfo { | ||||
660 | public: | ||||
661 | DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} | ||||
662 | |||||
663 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
664 | ABIArgInfo classifyArgumentType(QualType RetTy) const; | ||||
665 | |||||
666 | void computeInfo(CGFunctionInfo &FI) const override { | ||||
667 | if (!getCXXABI().classifyReturnType(FI)) | ||||
668 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
669 | for (auto &I : FI.arguments()) | ||||
670 | I.info = classifyArgumentType(I.type); | ||||
671 | } | ||||
672 | |||||
673 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
674 | QualType Ty) const override { | ||||
675 | return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); | ||||
676 | } | ||||
677 | }; | ||||
678 | |||||
679 | class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
680 | public: | ||||
681 | DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) | ||||
682 | : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} | ||||
683 | }; | ||||
684 | |||||
685 | ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { | ||||
686 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
687 | |||||
688 | if (isAggregateTypeForABI(Ty)) { | ||||
689 | // Records with non-trivial destructors/copy-constructors should not be | ||||
690 | // passed by value. | ||||
691 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) | ||||
692 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
693 | |||||
694 | return getNaturalAlignIndirect(Ty); | ||||
695 | } | ||||
696 | |||||
697 | // Treat an enum type as its underlying type. | ||||
698 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
699 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
700 | |||||
701 | return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) | ||||
702 | : ABIArgInfo::getDirect()); | ||||
703 | } | ||||
704 | |||||
705 | ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { | ||||
706 | if (RetTy->isVoidType()) | ||||
707 | return ABIArgInfo::getIgnore(); | ||||
708 | |||||
709 | if (isAggregateTypeForABI(RetTy)) | ||||
710 | return getNaturalAlignIndirect(RetTy); | ||||
711 | |||||
712 | // Treat an enum type as its underlying type. | ||||
713 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) | ||||
714 | RetTy = EnumTy->getDecl()->getIntegerType(); | ||||
715 | |||||
716 | return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) | ||||
717 | : ABIArgInfo::getDirect()); | ||||
718 | } | ||||
719 | |||||
720 | //===----------------------------------------------------------------------===// | ||||
721 | // WebAssembly ABI Implementation | ||||
722 | // | ||||
723 | // This is a very simple ABI that relies a lot on DefaultABIInfo. | ||||
724 | //===----------------------------------------------------------------------===// | ||||
725 | |||||
726 | class WebAssemblyABIInfo final : public SwiftABIInfo { | ||||
727 | DefaultABIInfo defaultInfo; | ||||
728 | |||||
729 | public: | ||||
730 | explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT) | ||||
731 | : SwiftABIInfo(CGT), defaultInfo(CGT) {} | ||||
732 | |||||
733 | private: | ||||
734 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
735 | ABIArgInfo classifyArgumentType(QualType Ty) const; | ||||
736 | |||||
737 | // DefaultABIInfo's classifyReturnType and classifyArgumentType are | ||||
738 | // non-virtual, but computeInfo and EmitVAArg are virtual, so we | ||||
739 | // overload them. | ||||
740 | void computeInfo(CGFunctionInfo &FI) const override { | ||||
741 | if (!getCXXABI().classifyReturnType(FI)) | ||||
742 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
743 | for (auto &Arg : FI.arguments()) | ||||
744 | Arg.info = classifyArgumentType(Arg.type); | ||||
745 | } | ||||
746 | |||||
747 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
748 | QualType Ty) const override; | ||||
749 | |||||
750 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, | ||||
751 | bool asReturnValue) const override { | ||||
752 | return occupiesMoreThan(CGT, scalars, /*total*/ 4); | ||||
753 | } | ||||
754 | |||||
755 | bool isSwiftErrorInRegister() const override { | ||||
756 | return false; | ||||
757 | } | ||||
758 | }; | ||||
759 | |||||
760 | class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { | ||||
761 | public: | ||||
762 | explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) | ||||
763 | : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {} | ||||
764 | |||||
765 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
766 | CodeGen::CodeGenModule &CGM) const override { | ||||
767 | TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); | ||||
768 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { | ||||
769 | if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) { | ||||
770 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
771 | llvm::AttrBuilder B; | ||||
772 | B.addAttribute("wasm-import-module", Attr->getImportModule()); | ||||
773 | Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); | ||||
774 | } | ||||
775 | if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) { | ||||
776 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
777 | llvm::AttrBuilder B; | ||||
778 | B.addAttribute("wasm-import-name", Attr->getImportName()); | ||||
779 | Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); | ||||
780 | } | ||||
781 | } | ||||
782 | |||||
783 | if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { | ||||
784 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
785 | if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype()) | ||||
786 | Fn->addFnAttr("no-prototype"); | ||||
787 | } | ||||
788 | } | ||||
789 | }; | ||||
790 | |||||
791 | /// Classify argument of given type \p Ty. | ||||
792 | ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { | ||||
793 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
794 | |||||
795 | if (isAggregateTypeForABI(Ty)) { | ||||
796 | // Records with non-trivial destructors/copy-constructors should not be | ||||
797 | // passed by value. | ||||
798 | if (auto RAA = getRecordArgABI(Ty, getCXXABI())) | ||||
799 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
800 | // Ignore empty structs/unions. | ||||
801 | if (isEmptyRecord(getContext(), Ty, true)) | ||||
802 | return ABIArgInfo::getIgnore(); | ||||
803 | // Lower single-element structs to just pass a regular value. TODO: We | ||||
804 | // could do reasonable-size multiple-element structs too, using getExpand(), | ||||
805 | // though watch out for things like bitfields. | ||||
806 | if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) | ||||
807 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); | ||||
808 | } | ||||
809 | |||||
810 | // Otherwise just do the default thing. | ||||
811 | return defaultInfo.classifyArgumentType(Ty); | ||||
812 | } | ||||
813 | |||||
814 | ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { | ||||
815 | if (isAggregateTypeForABI(RetTy)) { | ||||
816 | // Records with non-trivial destructors/copy-constructors should not be | ||||
817 | // returned by value. | ||||
818 | if (!getRecordArgABI(RetTy, getCXXABI())) { | ||||
819 | // Ignore empty structs/unions. | ||||
820 | if (isEmptyRecord(getContext(), RetTy, true)) | ||||
821 | return ABIArgInfo::getIgnore(); | ||||
822 | // Lower single-element structs to just return a regular value. TODO: We | ||||
823 | // could do reasonable-size multiple-element structs too, using | ||||
824 | // ABIArgInfo::getDirect(). | ||||
825 | if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) | ||||
826 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); | ||||
827 | } | ||||
828 | } | ||||
829 | |||||
830 | // Otherwise just do the default thing. | ||||
831 | return defaultInfo.classifyReturnType(RetTy); | ||||
832 | } | ||||
833 | |||||
834 | Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
835 | QualType Ty) const { | ||||
836 | bool IsIndirect = isAggregateTypeForABI(Ty) && | ||||
837 | !isEmptyRecord(getContext(), Ty, true) && | ||||
838 | !isSingleElementStruct(Ty, getContext()); | ||||
839 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, | ||||
840 | getContext().getTypeInfoInChars(Ty), | ||||
841 | CharUnits::fromQuantity(4), | ||||
842 | /*AllowHigherAlign=*/true); | ||||
843 | } | ||||
844 | |||||
845 | //===----------------------------------------------------------------------===// | ||||
846 | // le32/PNaCl bitcode ABI Implementation | ||||
847 | // | ||||
848 | // This is a simplified version of the x86_32 ABI. Arguments and return values | ||||
849 | // are always passed on the stack. | ||||
850 | //===----------------------------------------------------------------------===// | ||||
851 | |||||
852 | class PNaClABIInfo : public ABIInfo { | ||||
853 | public: | ||||
854 | PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} | ||||
855 | |||||
856 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
857 | ABIArgInfo classifyArgumentType(QualType RetTy) const; | ||||
858 | |||||
859 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
860 | Address EmitVAArg(CodeGenFunction &CGF, | ||||
861 | Address VAListAddr, QualType Ty) const override; | ||||
862 | }; | ||||
863 | |||||
864 | class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
865 | public: | ||||
866 | PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) | ||||
867 | : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} | ||||
868 | }; | ||||
869 | |||||
870 | void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
871 | if (!getCXXABI().classifyReturnType(FI)) | ||||
872 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
873 | |||||
874 | for (auto &I : FI.arguments()) | ||||
875 | I.info = classifyArgumentType(I.type); | ||||
876 | } | ||||
877 | |||||
878 | Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
879 | QualType Ty) const { | ||||
880 | // The PNaCL ABI is a bit odd, in that varargs don't use normal | ||||
881 | // function classification. Structs get passed directly for varargs | ||||
882 | // functions, through a rewriting transform in | ||||
883 | // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows | ||||
884 | // this target to actually support a va_arg instructions with an | ||||
885 | // aggregate type, unlike other targets. | ||||
886 | return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); | ||||
887 | } | ||||
888 | |||||
889 | /// Classify argument of given type \p Ty. | ||||
890 | ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { | ||||
891 | if (isAggregateTypeForABI(Ty)) { | ||||
892 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) | ||||
893 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
894 | return getNaturalAlignIndirect(Ty); | ||||
895 | } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { | ||||
896 | // Treat an enum type as its underlying type. | ||||
897 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
898 | } else if (Ty->isFloatingType()) { | ||||
899 | // Floating-point types don't go inreg. | ||||
900 | return ABIArgInfo::getDirect(); | ||||
901 | } | ||||
902 | |||||
903 | return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) | ||||
904 | : ABIArgInfo::getDirect()); | ||||
905 | } | ||||
906 | |||||
907 | ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { | ||||
908 | if (RetTy->isVoidType()) | ||||
909 | return ABIArgInfo::getIgnore(); | ||||
910 | |||||
911 | // In the PNaCl ABI we always return records/structures on the stack. | ||||
912 | if (isAggregateTypeForABI(RetTy)) | ||||
913 | return getNaturalAlignIndirect(RetTy); | ||||
914 | |||||
915 | // Treat an enum type as its underlying type. | ||||
916 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) | ||||
917 | RetTy = EnumTy->getDecl()->getIntegerType(); | ||||
918 | |||||
919 | return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) | ||||
920 | : ABIArgInfo::getDirect()); | ||||
921 | } | ||||
922 | |||||
923 | /// IsX86_MMXType - Return true if this is an MMX type. | ||||
924 | bool IsX86_MMXType(llvm::Type *IRType) { | ||||
925 | // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. | ||||
926 | return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && | ||||
927 | cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && | ||||
928 | IRType->getScalarSizeInBits() != 64; | ||||
929 | } | ||||
930 | |||||
931 | static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, | ||||
932 | StringRef Constraint, | ||||
933 | llvm::Type* Ty) { | ||||
934 | bool IsMMXCons = llvm::StringSwitch<bool>(Constraint) | ||||
935 | .Cases("y", "&y", "^Ym", true) | ||||
936 | .Default(false); | ||||
937 | if (IsMMXCons && Ty->isVectorTy()) { | ||||
938 | if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { | ||||
939 | // Invalid MMX constraint | ||||
940 | return nullptr; | ||||
941 | } | ||||
942 | |||||
943 | return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); | ||||
944 | } | ||||
945 | |||||
946 | // No operation needed | ||||
947 | return Ty; | ||||
948 | } | ||||
949 | |||||
950 | /// Returns true if this type can be passed in SSE registers with the | ||||
951 | /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. | ||||
952 | static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { | ||||
953 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { | ||||
954 | if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) { | ||||
955 | if (BT->getKind() == BuiltinType::LongDouble) { | ||||
956 | if (&Context.getTargetInfo().getLongDoubleFormat() == | ||||
957 | &llvm::APFloat::x87DoubleExtended()) | ||||
958 | return false; | ||||
959 | } | ||||
960 | return true; | ||||
961 | } | ||||
962 | } else if (const VectorType *VT = Ty->getAs<VectorType>()) { | ||||
963 | // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX | ||||
964 | // registers specially. | ||||
965 | unsigned VecSize = Context.getTypeSize(VT); | ||||
966 | if (VecSize == 128 || VecSize == 256 || VecSize == 512) | ||||
967 | return true; | ||||
968 | } | ||||
969 | return false; | ||||
970 | } | ||||
971 | |||||
972 | /// Returns true if this aggregate is small enough to be passed in SSE registers | ||||
973 | /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. | ||||
974 | static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { | ||||
975 | return NumMembers <= 4; | ||||
976 | } | ||||
977 | |||||
978 | /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86. | ||||
979 | static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { | ||||
980 | auto AI = ABIArgInfo::getDirect(T); | ||||
981 | AI.setInReg(true); | ||||
982 | AI.setCanBeFlattened(false); | ||||
983 | return AI; | ||||
984 | } | ||||
985 | |||||
986 | //===----------------------------------------------------------------------===// | ||||
987 | // X86-32 ABI Implementation | ||||
988 | //===----------------------------------------------------------------------===// | ||||
989 | |||||
990 | /// Similar to llvm::CCState, but for Clang. | ||||
991 | struct CCState { | ||||
992 | CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {} | ||||
993 | |||||
994 | unsigned CC; | ||||
995 | unsigned FreeRegs; | ||||
996 | unsigned FreeSSERegs; | ||||
997 | }; | ||||
998 | |||||
999 | enum { | ||||
1000 | // Vectorcall only allows the first 6 parameters to be passed in registers. | ||||
1001 | VectorcallMaxParamNumAsReg = 6 | ||||
1002 | }; | ||||
1003 | |||||
1004 | /// X86_32ABIInfo - The X86-32 ABI information. | ||||
1005 | class X86_32ABIInfo : public SwiftABIInfo { | ||||
1006 | enum Class { | ||||
1007 | Integer, | ||||
1008 | Float | ||||
1009 | }; | ||||
1010 | |||||
1011 | static const unsigned MinABIStackAlignInBytes = 4; | ||||
1012 | |||||
1013 | bool IsDarwinVectorABI; | ||||
1014 | bool IsRetSmallStructInRegABI; | ||||
1015 | bool IsWin32StructABI; | ||||
1016 | bool IsSoftFloatABI; | ||||
1017 | bool IsMCUABI; | ||||
1018 | unsigned DefaultNumRegisterParameters; | ||||
1019 | |||||
1020 | static bool isRegisterSize(unsigned Size) { | ||||
1021 | return (Size == 8 || Size == 16 || Size == 32 || Size == 64); | ||||
1022 | } | ||||
1023 | |||||
1024 | bool isHomogeneousAggregateBaseType(QualType Ty) const override { | ||||
1025 | // FIXME: Assumes vectorcall is in use. | ||||
1026 | return isX86VectorTypeForVectorCall(getContext(), Ty); | ||||
1027 | } | ||||
1028 | |||||
1029 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, | ||||
1030 | uint64_t NumMembers) const override { | ||||
1031 | // FIXME: Assumes vectorcall is in use. | ||||
1032 | return isX86VectorCallAggregateSmallEnough(NumMembers); | ||||
1033 | } | ||||
1034 | |||||
1035 | bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; | ||||
1036 | |||||
1037 | /// getIndirectResult - Give a source type \arg Ty, return a suitable result | ||||
1038 | /// such that the argument will be passed in memory. | ||||
1039 | ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; | ||||
1040 | |||||
1041 | ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; | ||||
1042 | |||||
1043 | /// Return the alignment to use for the given type on the stack. | ||||
1044 | unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; | ||||
1045 | |||||
1046 | Class classify(QualType Ty) const; | ||||
1047 | ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; | ||||
1048 | ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; | ||||
1049 | |||||
1050 | /// Updates the number of available free registers, returns | ||||
1051 | /// true if any registers were allocated. | ||||
1052 | bool updateFreeRegs(QualType Ty, CCState &State) const; | ||||
1053 | |||||
1054 | bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg, | ||||
1055 | bool &NeedsPadding) const; | ||||
1056 | bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const; | ||||
1057 | |||||
1058 | bool canExpandIndirectArgument(QualType Ty) const; | ||||
1059 | |||||
1060 | /// Rewrite the function info so that all memory arguments use | ||||
1061 | /// inalloca. | ||||
1062 | void rewriteWithInAlloca(CGFunctionInfo &FI) const; | ||||
1063 | |||||
1064 | void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, | ||||
1065 | CharUnits &StackOffset, ABIArgInfo &Info, | ||||
1066 | QualType Type) const; | ||||
1067 | void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State, | ||||
1068 | bool &UsedInAlloca) const; | ||||
1069 | |||||
1070 | public: | ||||
1071 | |||||
1072 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
1073 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
1074 | QualType Ty) const override; | ||||
1075 | |||||
1076 | X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, | ||||
1077 | bool RetSmallStructInRegABI, bool Win32StructABI, | ||||
1078 | unsigned NumRegisterParameters, bool SoftFloatABI) | ||||
1079 | : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), | ||||
1080 | IsRetSmallStructInRegABI(RetSmallStructInRegABI), | ||||
1081 | IsWin32StructABI(Win32StructABI), | ||||
1082 | IsSoftFloatABI(SoftFloatABI), | ||||
1083 | IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()), | ||||
1084 | DefaultNumRegisterParameters(NumRegisterParameters) {} | ||||
1085 | |||||
1086 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, | ||||
1087 | bool asReturnValue) const override { | ||||
1088 | // LLVM's x86-32 lowering currently only assigns up to three | ||||
1089 | // integer registers and three fp registers. Oddly, it'll use up to | ||||
1090 | // four vector registers for vectors, but those can overlap with the | ||||
1091 | // scalar registers. | ||||
1092 | return occupiesMoreThan(CGT, scalars, /*total*/ 3); | ||||
1093 | } | ||||
1094 | |||||
1095 | bool isSwiftErrorInRegister() const override { | ||||
1096 | // x86-32 lowering does not support passing swifterror in a register. | ||||
1097 | return false; | ||||
1098 | } | ||||
1099 | }; | ||||
1100 | |||||
1101 | class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { | ||||
1102 | public: | ||||
1103 | X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, | ||||
1104 | bool RetSmallStructInRegABI, bool Win32StructABI, | ||||
1105 | unsigned NumRegisterParameters, bool SoftFloatABI) | ||||
1106 | : TargetCodeGenInfo(new X86_32ABIInfo( | ||||
1107 | CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, | ||||
1108 | NumRegisterParameters, SoftFloatABI)) {} | ||||
1109 | |||||
1110 | static bool isStructReturnInRegABI( | ||||
1111 | const llvm::Triple &Triple, const CodeGenOptions &Opts); | ||||
1112 | |||||
1113 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
1114 | CodeGen::CodeGenModule &CGM) const override; | ||||
1115 | |||||
1116 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { | ||||
1117 | // Darwin uses different dwarf register numbers for EH. | ||||
1118 | if (CGM.getTarget().getTriple().isOSDarwin()) return 5; | ||||
1119 | return 4; | ||||
1120 | } | ||||
1121 | |||||
1122 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
1123 | llvm::Value *Address) const override; | ||||
1124 | |||||
1125 | llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, | ||||
1126 | StringRef Constraint, | ||||
1127 | llvm::Type* Ty) const override { | ||||
1128 | return X86AdjustInlineAsmType(CGF, Constraint, Ty); | ||||
1129 | } | ||||
1130 | |||||
1131 | void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, | ||||
1132 | std::string &Constraints, | ||||
1133 | std::vector<llvm::Type *> &ResultRegTypes, | ||||
1134 | std::vector<llvm::Type *> &ResultTruncRegTypes, | ||||
1135 | std::vector<LValue> &ResultRegDests, | ||||
1136 | std::string &AsmString, | ||||
1137 | unsigned NumOutputs) const override; | ||||
1138 | |||||
1139 | llvm::Constant * | ||||
1140 | getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { | ||||
1141 | unsigned Sig = (0xeb << 0) | // jmp rel8 | ||||
1142 | (0x06 << 8) | // .+0x08 | ||||
1143 | ('v' << 16) | | ||||
1144 | ('2' << 24); | ||||
1145 | return llvm::ConstantInt::get(CGM.Int32Ty, Sig); | ||||
1146 | } | ||||
1147 | |||||
1148 | StringRef getARCRetainAutoreleasedReturnValueMarker() const override { | ||||
1149 | return "movl\t%ebp, %ebp" | ||||
1150 | "\t\t// marker for objc_retainAutoreleaseReturnValue"; | ||||
1151 | } | ||||
1152 | }; | ||||
1153 | |||||
1154 | } | ||||
1155 | |||||
1156 | /// Rewrite input constraint references after adding some output constraints. | ||||
1157 | /// In the case where there is one output and one input and we add one output, | ||||
1158 | /// we need to replace all operand references greater than or equal to 1: | ||||
1159 | /// mov $0, $1 | ||||
1160 | /// mov eax, $1 | ||||
1161 | /// The result will be: | ||||
1162 | /// mov $0, $2 | ||||
1163 | /// mov eax, $2 | ||||
1164 | static void rewriteInputConstraintReferences(unsigned FirstIn, | ||||
1165 | unsigned NumNewOuts, | ||||
1166 | std::string &AsmString) { | ||||
1167 | std::string Buf; | ||||
1168 | llvm::raw_string_ostream OS(Buf); | ||||
1169 | size_t Pos = 0; | ||||
1170 | while (Pos < AsmString.size()) { | ||||
1171 | size_t DollarStart = AsmString.find('$', Pos); | ||||
1172 | if (DollarStart == std::string::npos) | ||||
1173 | DollarStart = AsmString.size(); | ||||
1174 | size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); | ||||
1175 | if (DollarEnd == std::string::npos) | ||||
1176 | DollarEnd = AsmString.size(); | ||||
1177 | OS << StringRef(&AsmString[Pos], DollarEnd - Pos); | ||||
1178 | Pos = DollarEnd; | ||||
1179 | size_t NumDollars = DollarEnd - DollarStart; | ||||
1180 | if (NumDollars % 2 != 0 && Pos < AsmString.size()) { | ||||
1181 | // We have an operand reference. | ||||
1182 | size_t DigitStart = Pos; | ||||
1183 | size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); | ||||
1184 | if (DigitEnd == std::string::npos) | ||||
1185 | DigitEnd = AsmString.size(); | ||||
1186 | StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); | ||||
1187 | unsigned OperandIndex; | ||||
1188 | if (!OperandStr.getAsInteger(10, OperandIndex)) { | ||||
1189 | if (OperandIndex >= FirstIn) | ||||
1190 | OperandIndex += NumNewOuts; | ||||
1191 | OS << OperandIndex; | ||||
1192 | } else { | ||||
1193 | OS << OperandStr; | ||||
1194 | } | ||||
1195 | Pos = DigitEnd; | ||||
1196 | } | ||||
1197 | } | ||||
1198 | AsmString = std::move(OS.str()); | ||||
1199 | } | ||||
1200 | |||||
1201 | /// Add output constraints for EAX:EDX because they are return registers. | ||||
1202 | void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( | ||||
1203 | CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, | ||||
1204 | std::vector<llvm::Type *> &ResultRegTypes, | ||||
1205 | std::vector<llvm::Type *> &ResultTruncRegTypes, | ||||
1206 | std::vector<LValue> &ResultRegDests, std::string &AsmString, | ||||
1207 | unsigned NumOutputs) const { | ||||
1208 | uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); | ||||
1209 | |||||
1210 | // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is | ||||
1211 | // larger. | ||||
1212 | if (!Constraints.empty()) | ||||
1213 | Constraints += ','; | ||||
1214 | if (RetWidth <= 32) { | ||||
1215 | Constraints += "={eax}"; | ||||
1216 | ResultRegTypes.push_back(CGF.Int32Ty); | ||||
1217 | } else { | ||||
1218 | // Use the 'A' constraint for EAX:EDX. | ||||
1219 | Constraints += "=A"; | ||||
1220 | ResultRegTypes.push_back(CGF.Int64Ty); | ||||
1221 | } | ||||
1222 | |||||
1223 | // Truncate EAX or EAX:EDX to an integer of the appropriate size. | ||||
1224 | llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); | ||||
1225 | ResultTruncRegTypes.push_back(CoerceTy); | ||||
1226 | |||||
1227 | // Coerce the integer by bitcasting the return slot pointer. | ||||
1228 | ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), | ||||
1229 | CoerceTy->getPointerTo())); | ||||
1230 | ResultRegDests.push_back(ReturnSlot); | ||||
1231 | |||||
1232 | rewriteInputConstraintReferences(NumOutputs, 1, AsmString); | ||||
1233 | } | ||||
1234 | |||||
1235 | /// shouldReturnTypeInRegister - Determine if the given type should be | ||||
1236 | /// returned in a register (for the Darwin and MCU ABI). | ||||
1237 | bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, | ||||
1238 | ASTContext &Context) const { | ||||
1239 | uint64_t Size = Context.getTypeSize(Ty); | ||||
1240 | |||||
1241 | // For i386, type must be register sized. | ||||
1242 | // For the MCU ABI, it only needs to be <= 8-byte | ||||
1243 | if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) | ||||
1244 | return false; | ||||
1245 | |||||
1246 | if (Ty->isVectorType()) { | ||||
1247 | // 64- and 128- bit vectors inside structures are not returned in | ||||
1248 | // registers. | ||||
1249 | if (Size == 64 || Size == 128) | ||||
1250 | return false; | ||||
1251 | |||||
1252 | return true; | ||||
1253 | } | ||||
1254 | |||||
1255 | // If this is a builtin, pointer, enum, complex type, member pointer, or | ||||
1256 | // member function pointer it is ok. | ||||
1257 | if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || | ||||
1258 | Ty->isAnyComplexType() || Ty->isEnumeralType() || | ||||
1259 | Ty->isBlockPointerType() || Ty->isMemberPointerType()) | ||||
1260 | return true; | ||||
1261 | |||||
1262 | // Arrays are treated like records. | ||||
1263 | if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) | ||||
1264 | return shouldReturnTypeInRegister(AT->getElementType(), Context); | ||||
1265 | |||||
1266 | // Otherwise, it must be a record type. | ||||
1267 | const RecordType *RT = Ty->getAs<RecordType>(); | ||||
1268 | if (!RT) return false; | ||||
1269 | |||||
1270 | // FIXME: Traverse bases here too. | ||||
1271 | |||||
1272 | // Structure types are passed in register if all fields would be | ||||
1273 | // passed in a register. | ||||
1274 | for (const auto *FD : RT->getDecl()->fields()) { | ||||
1275 | // Empty fields are ignored. | ||||
1276 | if (isEmptyField(Context, FD, true)) | ||||
1277 | continue; | ||||
1278 | |||||
1279 | // Check fields recursively. | ||||
1280 | if (!shouldReturnTypeInRegister(FD->getType(), Context)) | ||||
1281 | return false; | ||||
1282 | } | ||||
1283 | return true; | ||||
1284 | } | ||||
1285 | |||||
1286 | static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { | ||||
1287 | // Treat complex types as the element type. | ||||
1288 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) | ||||
1289 | Ty = CTy->getElementType(); | ||||
1290 | |||||
1291 | // Check for a type which we know has a simple scalar argument-passing | ||||
1292 | // convention without any padding. (We're specifically looking for 32 | ||||
1293 | // and 64-bit integer and integer-equivalents, float, and double.) | ||||
1294 | if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && | ||||
1295 | !Ty->isEnumeralType() && !Ty->isBlockPointerType()) | ||||
1296 | return false; | ||||
1297 | |||||
1298 | uint64_t Size = Context.getTypeSize(Ty); | ||||
1299 | return Size == 32 || Size == 64; | ||||
1300 | } | ||||
1301 | |||||
1302 | static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, | ||||
1303 | uint64_t &Size) { | ||||
1304 | for (const auto *FD : RD->fields()) { | ||||
1305 | // Scalar arguments on the stack get 4 byte alignment on x86. If the | ||||
1306 | // argument is smaller than 32-bits, expanding the struct will create | ||||
1307 | // alignment padding. | ||||
1308 | if (!is32Or64BitBasicType(FD->getType(), Context)) | ||||
1309 | return false; | ||||
1310 | |||||
1311 | // FIXME: Reject bit-fields wholesale; there are two problems, we don't know | ||||
1312 | // how to expand them yet, and the predicate for telling if a bitfield still | ||||
1313 | // counts as "basic" is more complicated than what we were doing previously. | ||||
1314 | if (FD->isBitField()) | ||||
1315 | return false; | ||||
1316 | |||||
1317 | Size += Context.getTypeSize(FD->getType()); | ||||
1318 | } | ||||
1319 | return true; | ||||
1320 | } | ||||
1321 | |||||
1322 | static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, | ||||
1323 | uint64_t &Size) { | ||||
1324 | // Don't do this if there are any non-empty bases. | ||||
1325 | for (const CXXBaseSpecifier &Base : RD->bases()) { | ||||
1326 | if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(), | ||||
1327 | Size)) | ||||
1328 | return false; | ||||
1329 | } | ||||
1330 | if (!addFieldSizes(Context, RD, Size)) | ||||
1331 | return false; | ||||
1332 | return true; | ||||
1333 | } | ||||
1334 | |||||
1335 | /// Test whether an argument type which is to be passed indirectly (on the | ||||
1336 | /// stack) would have the equivalent layout if it was expanded into separate | ||||
1337 | /// arguments. If so, we prefer to do the latter to avoid inhibiting | ||||
1338 | /// optimizations. | ||||
1339 | bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const { | ||||
1340 | // We can only expand structure types. | ||||
1341 | const RecordType *RT = Ty->getAs<RecordType>(); | ||||
1342 | if (!RT) | ||||
1343 | return false; | ||||
1344 | const RecordDecl *RD = RT->getDecl(); | ||||
1345 | uint64_t Size = 0; | ||||
1346 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { | ||||
1347 | if (!IsWin32StructABI) { | ||||
1348 | // On non-Windows, we have to conservatively match our old bitcode | ||||
1349 | // prototypes in order to be ABI-compatible at the bitcode level. | ||||
1350 | if (!CXXRD->isCLike()) | ||||
1351 | return false; | ||||
1352 | } else { | ||||
1353 | // Don't do this for dynamic classes. | ||||
1354 | if (CXXRD->isDynamicClass()) | ||||
1355 | return false; | ||||
1356 | } | ||||
1357 | if (!addBaseAndFieldSizes(getContext(), CXXRD, Size)) | ||||
1358 | return false; | ||||
1359 | } else { | ||||
1360 | if (!addFieldSizes(getContext(), RD, Size)) | ||||
1361 | return false; | ||||
1362 | } | ||||
1363 | |||||
1364 | // We can do this if there was no alignment padding. | ||||
1365 | return Size == getContext().getTypeSize(Ty); | ||||
1366 | } | ||||
1367 | |||||
1368 | ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { | ||||
1369 | // If the return value is indirect, then the hidden argument is consuming one | ||||
1370 | // integer register. | ||||
1371 | if (State.FreeRegs) { | ||||
1372 | --State.FreeRegs; | ||||
1373 | if (!IsMCUABI) | ||||
1374 | return getNaturalAlignIndirectInReg(RetTy); | ||||
1375 | } | ||||
1376 | return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); | ||||
1377 | } | ||||
1378 | |||||
1379 | ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, | ||||
1380 | CCState &State) const { | ||||
1381 | if (RetTy->isVoidType()) | ||||
1382 | return ABIArgInfo::getIgnore(); | ||||
1383 | |||||
1384 | const Type *Base = nullptr; | ||||
1385 | uint64_t NumElts = 0; | ||||
1386 | if ((State.CC == llvm::CallingConv::X86_VectorCall || | ||||
1387 | State.CC == llvm::CallingConv::X86_RegCall) && | ||||
1388 | isHomogeneousAggregate(RetTy, Base, NumElts)) { | ||||
1389 | // The LLVM struct type for such an aggregate should lower properly. | ||||
1390 | return ABIArgInfo::getDirect(); | ||||
1391 | } | ||||
1392 | |||||
1393 | if (const VectorType *VT = RetTy->getAs<VectorType>()) { | ||||
1394 | // On Darwin, some vectors are returned in registers. | ||||
1395 | if (IsDarwinVectorABI) { | ||||
1396 | uint64_t Size = getContext().getTypeSize(RetTy); | ||||
1397 | |||||
1398 | // 128-bit vectors are a special case; they are returned in | ||||
1399 | // registers and we need to make sure to pick a type the LLVM | ||||
1400 | // backend will like. | ||||
1401 | if (Size == 128) | ||||
1402 | return ABIArgInfo::getDirect(llvm::VectorType::get( | ||||
1403 | llvm::Type::getInt64Ty(getVMContext()), 2)); | ||||
1404 | |||||
1405 | // Always return in register if it fits in a general purpose | ||||
1406 | // register, or if it is 64 bits and has a single element. | ||||
1407 | if ((Size == 8 || Size == 16 || Size == 32) || | ||||
1408 | (Size == 64 && VT->getNumElements() == 1)) | ||||
1409 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), | ||||
1410 | Size)); | ||||
1411 | |||||
1412 | return getIndirectReturnResult(RetTy, State); | ||||
1413 | } | ||||
1414 | |||||
1415 | return ABIArgInfo::getDirect(); | ||||
1416 | } | ||||
1417 | |||||
1418 | if (isAggregateTypeForABI(RetTy)) { | ||||
1419 | if (const RecordType *RT = RetTy->getAs<RecordType>()) { | ||||
1420 | // Structures with flexible arrays are always indirect. | ||||
1421 | if (RT->getDecl()->hasFlexibleArrayMember()) | ||||
1422 | return getIndirectReturnResult(RetTy, State); | ||||
1423 | } | ||||
1424 | |||||
1425 | // If specified, structs and unions are always indirect. | ||||
1426 | if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) | ||||
1427 | return getIndirectReturnResult(RetTy, State); | ||||
1428 | |||||
1429 | // Ignore empty structs/unions. | ||||
1430 | if (isEmptyRecord(getContext(), RetTy, true)) | ||||
1431 | return ABIArgInfo::getIgnore(); | ||||
1432 | |||||
1433 | // Small structures which are register sized are generally returned | ||||
1434 | // in a register. | ||||
1435 | if (shouldReturnTypeInRegister(RetTy, getContext())) { | ||||
1436 | uint64_t Size = getContext().getTypeSize(RetTy); | ||||
1437 | |||||
1438 | // As a special-case, if the struct is a "single-element" struct, and | ||||
1439 | // the field is of type "float" or "double", return it in a | ||||
1440 | // floating-point register. (MSVC does not apply this special case.) | ||||
1441 | // We apply a similar transformation for pointer types to improve the | ||||
1442 | // quality of the generated IR. | ||||
1443 | if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) | ||||
1444 | if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) | ||||
1445 | || SeltTy->hasPointerRepresentation()) | ||||
1446 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); | ||||
1447 | |||||
1448 | // FIXME: We should be able to narrow this integer in cases with dead | ||||
1449 | // padding. | ||||
1450 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); | ||||
1451 | } | ||||
1452 | |||||
1453 | return getIndirectReturnResult(RetTy, State); | ||||
1454 | } | ||||
1455 | |||||
1456 | // Treat an enum type as its underlying type. | ||||
1457 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) | ||||
1458 | RetTy = EnumTy->getDecl()->getIntegerType(); | ||||
1459 | |||||
1460 | return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) | ||||
1461 | : ABIArgInfo::getDirect()); | ||||
1462 | } | ||||
1463 | |||||
1464 | static bool isSSEVectorType(ASTContext &Context, QualType Ty) { | ||||
1465 | return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; | ||||
1466 | } | ||||
1467 | |||||
1468 | static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { | ||||
1469 | const RecordType *RT = Ty->getAs<RecordType>(); | ||||
1470 | if (!RT) | ||||
1471 | return 0; | ||||
1472 | const RecordDecl *RD = RT->getDecl(); | ||||
1473 | |||||
1474 | // If this is a C++ record, check the bases first. | ||||
1475 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) | ||||
1476 | for (const auto &I : CXXRD->bases()) | ||||
1477 | if (!isRecordWithSSEVectorType(Context, I.getType())) | ||||
1478 | return false; | ||||
1479 | |||||
1480 | for (const auto *i : RD->fields()) { | ||||
1481 | QualType FT = i->getType(); | ||||
1482 | |||||
1483 | if (isSSEVectorType(Context, FT)) | ||||
1484 | return true; | ||||
1485 | |||||
1486 | if (isRecordWithSSEVectorType(Context, FT)) | ||||
1487 | return true; | ||||
1488 | } | ||||
1489 | |||||
1490 | return false; | ||||
1491 | } | ||||
1492 | |||||
1493 | unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, | ||||
1494 | unsigned Align) const { | ||||
1495 | // Otherwise, if the alignment is less than or equal to the minimum ABI | ||||
1496 | // alignment, just use the default; the backend will handle this. | ||||
1497 | if (Align <= MinABIStackAlignInBytes) | ||||
1498 | return 0; // Use default alignment. | ||||
1499 | |||||
1500 | // On non-Darwin, the stack type alignment is always 4. | ||||
1501 | if (!IsDarwinVectorABI) { | ||||
1502 | // Set explicit alignment, since we may need to realign the top. | ||||
1503 | return MinABIStackAlignInBytes; | ||||
1504 | } | ||||
1505 | |||||
1506 | // Otherwise, if the type contains an SSE vector type, the alignment is 16. | ||||
1507 | if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || | ||||
1508 | isRecordWithSSEVectorType(getContext(), Ty))) | ||||
1509 | return 16; | ||||
1510 | |||||
1511 | return MinABIStackAlignInBytes; | ||||
1512 | } | ||||
1513 | |||||
1514 | ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, | ||||
1515 | CCState &State) const { | ||||
1516 | if (!ByVal) { | ||||
1517 | if (State.FreeRegs) { | ||||
1518 | --State.FreeRegs; // Non-byval indirects just use one pointer. | ||||
1519 | if (!IsMCUABI) | ||||
1520 | return getNaturalAlignIndirectInReg(Ty); | ||||
1521 | } | ||||
1522 | return getNaturalAlignIndirect(Ty, false); | ||||
1523 | } | ||||
1524 | |||||
1525 | // Compute the byval alignment. | ||||
1526 | unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; | ||||
1527 | unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); | ||||
1528 | if (StackAlign == 0) | ||||
1529 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true); | ||||
1530 | |||||
1531 | // If the stack alignment is less than the type alignment, realign the | ||||
1532 | // argument. | ||||
1533 | bool Realign = TypeAlign > StackAlign; | ||||
1534 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), | ||||
1535 | /*ByVal=*/true, Realign); | ||||
1536 | } | ||||
1537 | |||||
1538 | X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { | ||||
1539 | const Type *T = isSingleElementStruct(Ty, getContext()); | ||||
1540 | if (!T) | ||||
1541 | T = Ty.getTypePtr(); | ||||
1542 | |||||
1543 | if (const BuiltinType *BT = T->getAs<BuiltinType>()) { | ||||
1544 | BuiltinType::Kind K = BT->getKind(); | ||||
1545 | if (K == BuiltinType::Float || K == BuiltinType::Double) | ||||
1546 | return Float; | ||||
1547 | } | ||||
1548 | return Integer; | ||||
1549 | } | ||||
1550 | |||||
1551 | bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const { | ||||
1552 | if (!IsSoftFloatABI) { | ||||
1553 | Class C = classify(Ty); | ||||
1554 | if (C == Float) | ||||
1555 | return false; | ||||
1556 | } | ||||
1557 | |||||
1558 | unsigned Size = getContext().getTypeSize(Ty); | ||||
1559 | unsigned SizeInRegs = (Size + 31) / 32; | ||||
1560 | |||||
1561 | if (SizeInRegs == 0) | ||||
1562 | return false; | ||||
1563 | |||||
1564 | if (!IsMCUABI) { | ||||
1565 | if (SizeInRegs > State.FreeRegs) { | ||||
1566 | State.FreeRegs = 0; | ||||
1567 | return false; | ||||
1568 | } | ||||
1569 | } else { | ||||
1570 | // The MCU psABI allows passing parameters in-reg even if there are | ||||
1571 | // earlier parameters that are passed on the stack. Also, | ||||
1572 | // it does not allow passing >8-byte structs in-register, | ||||
1573 | // even if there are 3 free registers available. | ||||
1574 | if (SizeInRegs > State.FreeRegs || SizeInRegs > 2) | ||||
1575 | return false; | ||||
1576 | } | ||||
1577 | |||||
1578 | State.FreeRegs -= SizeInRegs; | ||||
1579 | return true; | ||||
1580 | } | ||||
1581 | |||||
1582 | bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State, | ||||
1583 | bool &InReg, | ||||
1584 | bool &NeedsPadding) const { | ||||
1585 | // On Windows, aggregates other than HFAs are never passed in registers, and | ||||
1586 | // they do not consume register slots. Homogenous floating-point aggregates | ||||
1587 | // (HFAs) have already been dealt with at this point. | ||||
1588 | if (IsWin32StructABI && isAggregateTypeForABI(Ty)) | ||||
1589 | return false; | ||||
1590 | |||||
1591 | NeedsPadding = false; | ||||
1592 | InReg = !IsMCUABI; | ||||
1593 | |||||
1594 | if (!updateFreeRegs(Ty, State)) | ||||
1595 | return false; | ||||
1596 | |||||
1597 | if (IsMCUABI) | ||||
1598 | return true; | ||||
1599 | |||||
1600 | if (State.CC == llvm::CallingConv::X86_FastCall || | ||||
1601 | State.CC == llvm::CallingConv::X86_VectorCall || | ||||
1602 | State.CC == llvm::CallingConv::X86_RegCall) { | ||||
1603 | if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs) | ||||
1604 | NeedsPadding = true; | ||||
1605 | |||||
1606 | return false; | ||||
1607 | } | ||||
1608 | |||||
1609 | return true; | ||||
1610 | } | ||||
1611 | |||||
1612 | bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const { | ||||
1613 | if (!updateFreeRegs(Ty, State)) | ||||
1614 | return false; | ||||
1615 | |||||
1616 | if (IsMCUABI) | ||||
1617 | return false; | ||||
1618 | |||||
1619 | if (State.CC == llvm::CallingConv::X86_FastCall || | ||||
1620 | State.CC == llvm::CallingConv::X86_VectorCall || | ||||
1621 | State.CC == llvm::CallingConv::X86_RegCall) { | ||||
1622 | if (getContext().getTypeSize(Ty) > 32) | ||||
1623 | return false; | ||||
1624 | |||||
1625 | return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() || | ||||
1626 | Ty->isReferenceType()); | ||||
1627 | } | ||||
1628 | |||||
1629 | return true; | ||||
1630 | } | ||||
1631 | |||||
1632 | ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, | ||||
1633 | CCState &State) const { | ||||
1634 | // FIXME: Set alignment on indirect arguments. | ||||
1635 | |||||
1636 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
1637 | |||||
1638 | // Check with the C++ ABI first. | ||||
1639 | const RecordType *RT = Ty->getAs<RecordType>(); | ||||
1640 | if (RT) { | ||||
1641 | CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); | ||||
1642 | if (RAA == CGCXXABI::RAA_Indirect) { | ||||
1643 | return getIndirectResult(Ty, false, State); | ||||
1644 | } else if (RAA == CGCXXABI::RAA_DirectInMemory) { | ||||
1645 | // The field index doesn't matter, we'll fix it up later. | ||||
1646 | return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); | ||||
1647 | } | ||||
1648 | } | ||||
1649 | |||||
1650 | // Regcall uses the concept of a homogenous vector aggregate, similar | ||||
1651 | // to other targets. | ||||
1652 | const Type *Base = nullptr; | ||||
1653 | uint64_t NumElts = 0; | ||||
1654 | if (State.CC == llvm::CallingConv::X86_RegCall && | ||||
1655 | isHomogeneousAggregate(Ty, Base, NumElts)) { | ||||
1656 | |||||
1657 | if (State.FreeSSERegs >= NumElts) { | ||||
1658 | State.FreeSSERegs -= NumElts; | ||||
1659 | if (Ty->isBuiltinType() || Ty->isVectorType()) | ||||
1660 | return ABIArgInfo::getDirect(); | ||||
1661 | return ABIArgInfo::getExpand(); | ||||
1662 | } | ||||
1663 | return getIndirectResult(Ty, /*ByVal=*/false, State); | ||||
1664 | } | ||||
1665 | |||||
1666 | if (isAggregateTypeForABI(Ty)) { | ||||
1667 | // Structures with flexible arrays are always indirect. | ||||
1668 | // FIXME: This should not be byval! | ||||
1669 | if (RT && RT->getDecl()->hasFlexibleArrayMember()) | ||||
1670 | return getIndirectResult(Ty, true, State); | ||||
1671 | |||||
1672 | // Ignore empty structs/unions on non-Windows. | ||||
1673 | if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true)) | ||||
1674 | return ABIArgInfo::getIgnore(); | ||||
1675 | |||||
1676 | llvm::LLVMContext &LLVMContext = getVMContext(); | ||||
1677 | llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); | ||||
1678 | bool NeedsPadding = false; | ||||
1679 | bool InReg; | ||||
1680 | if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { | ||||
1681 | unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; | ||||
1682 | SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); | ||||
1683 | llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); | ||||
1684 | if (InReg) | ||||
1685 | return ABIArgInfo::getDirectInReg(Result); | ||||
1686 | else | ||||
1687 | return ABIArgInfo::getDirect(Result); | ||||
1688 | } | ||||
1689 | llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; | ||||
1690 | |||||
1691 | // Expand small (<= 128-bit) record types when we know that the stack layout | ||||
1692 | // of those arguments will match the struct. This is important because the | ||||
1693 | // LLVM backend isn't smart enough to remove byval, which inhibits many | ||||
1694 | // optimizations. | ||||
1695 | // Don't do this for the MCU if there are still free integer registers | ||||
1696 | // (see X86_64 ABI for full explanation). | ||||
1697 | if (getContext().getTypeSize(Ty) <= 4 * 32 && | ||||
1698 | (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty)) | ||||
1699 | return ABIArgInfo::getExpandWithPadding( | ||||
1700 | State.CC == llvm::CallingConv::X86_FastCall || | ||||
1701 | State.CC == llvm::CallingConv::X86_VectorCall || | ||||
1702 | State.CC == llvm::CallingConv::X86_RegCall, | ||||
1703 | PaddingType); | ||||
1704 | |||||
1705 | return getIndirectResult(Ty, true, State); | ||||
1706 | } | ||||
1707 | |||||
1708 | if (const VectorType *VT = Ty->getAs<VectorType>()) { | ||||
1709 | // On Darwin, some vectors are passed in memory, we handle this by passing | ||||
1710 | // it as an i8/i16/i32/i64. | ||||
1711 | if (IsDarwinVectorABI) { | ||||
1712 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
1713 | if ((Size == 8 || Size == 16 || Size == 32) || | ||||
1714 | (Size == 64 && VT->getNumElements() == 1)) | ||||
1715 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), | ||||
1716 | Size)); | ||||
1717 | } | ||||
1718 | |||||
1719 | if (IsX86_MMXType(CGT.ConvertType(Ty))) | ||||
1720 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); | ||||
1721 | |||||
1722 | return ABIArgInfo::getDirect(); | ||||
1723 | } | ||||
1724 | |||||
1725 | |||||
1726 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
1727 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
1728 | |||||
1729 | bool InReg = shouldPrimitiveUseInReg(Ty, State); | ||||
1730 | |||||
1731 | if (Ty->isPromotableIntegerType()) { | ||||
1732 | if (InReg) | ||||
1733 | return ABIArgInfo::getExtendInReg(Ty); | ||||
1734 | return ABIArgInfo::getExtend(Ty); | ||||
1735 | } | ||||
1736 | |||||
1737 | if (InReg) | ||||
1738 | return ABIArgInfo::getDirectInReg(); | ||||
1739 | return ABIArgInfo::getDirect(); | ||||
1740 | } | ||||
1741 | |||||
1742 | void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State, | ||||
1743 | bool &UsedInAlloca) const { | ||||
1744 | // Vectorcall x86 works subtly different than in x64, so the format is | ||||
1745 | // a bit different than the x64 version. First, all vector types (not HVAs) | ||||
1746 | // are assigned, with the first 6 ending up in the YMM0-5 or XMM0-5 registers. | ||||
1747 | // This differs from the x64 implementation, where the first 6 by INDEX get | ||||
1748 | // registers. | ||||
1749 | // After that, integers AND HVAs are assigned Left to Right in the same pass. | ||||
1750 | // Integers are passed as ECX/EDX if one is available (in order). HVAs will | ||||
1751 | // first take up the remaining YMM/XMM registers. If insufficient registers | ||||
1752 | // remain but an integer register (ECX/EDX) is available, it will be passed | ||||
1753 | // in that, else, on the stack. | ||||
1754 | for (auto &I : FI.arguments()) { | ||||
1755 | // First pass do all the vector types. | ||||
1756 | const Type *Base = nullptr; | ||||
1757 | uint64_t NumElts = 0; | ||||
1758 | const QualType& Ty = I.type; | ||||
1759 | if ((Ty->isVectorType() || Ty->isBuiltinType()) && | ||||
1760 | isHomogeneousAggregate(Ty, Base, NumElts)) { | ||||
1761 | if (State.FreeSSERegs >= NumElts) { | ||||
1762 | State.FreeSSERegs -= NumElts; | ||||
1763 | I.info = ABIArgInfo::getDirect(); | ||||
1764 | } else { | ||||
1765 | I.info = classifyArgumentType(Ty, State); | ||||
1766 | } | ||||
1767 | UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); | ||||
1768 | } | ||||
1769 | } | ||||
1770 | |||||
1771 | for (auto &I : FI.arguments()) { | ||||
1772 | // Second pass, do the rest! | ||||
1773 | const Type *Base = nullptr; | ||||
1774 | uint64_t NumElts = 0; | ||||
1775 | const QualType& Ty = I.type; | ||||
1776 | bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts); | ||||
1777 | |||||
1778 | if (IsHva && !Ty->isVectorType() && !Ty->isBuiltinType()) { | ||||
1779 | // Assign true HVAs (non vector/native FP types). | ||||
1780 | if (State.FreeSSERegs >= NumElts) { | ||||
1781 | State.FreeSSERegs -= NumElts; | ||||
1782 | I.info = getDirectX86Hva(); | ||||
1783 | } else { | ||||
1784 | I.info = getIndirectResult(Ty, /*ByVal=*/false, State); | ||||
1785 | } | ||||
1786 | } else if (!IsHva) { | ||||
1787 | // Assign all Non-HVAs, so this will exclude Vector/FP args. | ||||
1788 | I.info = classifyArgumentType(Ty, State); | ||||
1789 | UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); | ||||
1790 | } | ||||
1791 | } | ||||
1792 | } | ||||
1793 | |||||
1794 | void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
1795 | CCState State(FI.getCallingConvention()); | ||||
1796 | if (IsMCUABI) | ||||
1797 | State.FreeRegs = 3; | ||||
1798 | else if (State.CC == llvm::CallingConv::X86_FastCall) | ||||
1799 | State.FreeRegs = 2; | ||||
1800 | else if (State.CC == llvm::CallingConv::X86_VectorCall) { | ||||
1801 | State.FreeRegs = 2; | ||||
1802 | State.FreeSSERegs = 6; | ||||
1803 | } else if (FI.getHasRegParm()) | ||||
1804 | State.FreeRegs = FI.getRegParm(); | ||||
1805 | else if (State.CC == llvm::CallingConv::X86_RegCall) { | ||||
1806 | State.FreeRegs = 5; | ||||
1807 | State.FreeSSERegs = 8; | ||||
1808 | } else | ||||
1809 | State.FreeRegs = DefaultNumRegisterParameters; | ||||
1810 | |||||
1811 | if (!::classifyReturnType(getCXXABI(), FI, *this)) { | ||||
1812 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); | ||||
1813 | } else if (FI.getReturnInfo().isIndirect()) { | ||||
1814 | // The C++ ABI is not aware of register usage, so we have to check if the | ||||
1815 | // return value was sret and put it in a register ourselves if appropriate. | ||||
1816 | if (State.FreeRegs) { | ||||
1817 | --State.FreeRegs; // The sret parameter consumes a register. | ||||
1818 | if (!IsMCUABI) | ||||
1819 | FI.getReturnInfo().setInReg(true); | ||||
1820 | } | ||||
1821 | } | ||||
1822 | |||||
1823 | // The chain argument effectively gives us another free register. | ||||
1824 | if (FI.isChainCall()) | ||||
1825 | ++State.FreeRegs; | ||||
1826 | |||||
1827 | bool UsedInAlloca = false; | ||||
1828 | if (State.CC == llvm::CallingConv::X86_VectorCall) { | ||||
1829 | computeVectorCallArgs(FI, State, UsedInAlloca); | ||||
1830 | } else { | ||||
1831 | // If not vectorcall, revert to normal behavior. | ||||
1832 | for (auto &I : FI.arguments()) { | ||||
1833 | I.info = classifyArgumentType(I.type, State); | ||||
1834 | UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); | ||||
1835 | } | ||||
1836 | } | ||||
1837 | |||||
1838 | // If we needed to use inalloca for any argument, do a second pass and rewrite | ||||
1839 | // all the memory arguments to use inalloca. | ||||
1840 | if (UsedInAlloca) | ||||
1841 | rewriteWithInAlloca(FI); | ||||
1842 | } | ||||
1843 | |||||
1844 | void | ||||
1845 | X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, | ||||
1846 | CharUnits &StackOffset, ABIArgInfo &Info, | ||||
1847 | QualType Type) const { | ||||
1848 | // Arguments are always 4-byte-aligned. | ||||
1849 | CharUnits FieldAlign = CharUnits::fromQuantity(4); | ||||
1850 | |||||
1851 | assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct")((StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct" ) ? static_cast<void> (0) : __assert_fail ("StackOffset.isMultipleOf(FieldAlign) && \"unaligned inalloca struct\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 1851, __PRETTY_FUNCTION__)); | ||||
1852 | Info = ABIArgInfo::getInAlloca(FrameFields.size()); | ||||
1853 | FrameFields.push_back(CGT.ConvertTypeForMem(Type)); | ||||
1854 | StackOffset += getContext().getTypeSizeInChars(Type); | ||||
1855 | |||||
1856 | // Insert padding bytes to respect alignment. | ||||
1857 | CharUnits FieldEnd = StackOffset; | ||||
1858 | StackOffset = FieldEnd.alignTo(FieldAlign); | ||||
1859 | if (StackOffset != FieldEnd) { | ||||
1860 | CharUnits NumBytes = StackOffset - FieldEnd; | ||||
1861 | llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); | ||||
1862 | Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); | ||||
1863 | FrameFields.push_back(Ty); | ||||
1864 | } | ||||
1865 | } | ||||
1866 | |||||
1867 | static bool isArgInAlloca(const ABIArgInfo &Info) { | ||||
1868 | // Leave ignored and inreg arguments alone. | ||||
1869 | switch (Info.getKind()) { | ||||
1870 | case ABIArgInfo::InAlloca: | ||||
1871 | return true; | ||||
1872 | case ABIArgInfo::Indirect: | ||||
1873 | assert(Info.getIndirectByVal())((Info.getIndirectByVal()) ? static_cast<void> (0) : __assert_fail ("Info.getIndirectByVal()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 1873, __PRETTY_FUNCTION__)); | ||||
1874 | return true; | ||||
1875 | case ABIArgInfo::Ignore: | ||||
1876 | return false; | ||||
1877 | case ABIArgInfo::Direct: | ||||
1878 | case ABIArgInfo::Extend: | ||||
1879 | if (Info.getInReg()) | ||||
1880 | return false; | ||||
1881 | return true; | ||||
1882 | case ABIArgInfo::Expand: | ||||
1883 | case ABIArgInfo::CoerceAndExpand: | ||||
1884 | // These are aggregate types which are never passed in registers when | ||||
1885 | // inalloca is involved. | ||||
1886 | return true; | ||||
1887 | } | ||||
1888 | llvm_unreachable("invalid enum")::llvm::llvm_unreachable_internal("invalid enum", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 1888); | ||||
1889 | } | ||||
1890 | |||||
1891 | void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { | ||||
1892 | assert(IsWin32StructABI && "inalloca only supported on win32")((IsWin32StructABI && "inalloca only supported on win32" ) ? static_cast<void> (0) : __assert_fail ("IsWin32StructABI && \"inalloca only supported on win32\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 1892, __PRETTY_FUNCTION__)); | ||||
1893 | |||||
1894 | // Build a packed struct type for all of the arguments in memory. | ||||
1895 | SmallVector<llvm::Type *, 6> FrameFields; | ||||
1896 | |||||
1897 | // The stack alignment is always 4. | ||||
1898 | CharUnits StackAlign = CharUnits::fromQuantity(4); | ||||
1899 | |||||
1900 | CharUnits StackOffset; | ||||
1901 | CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); | ||||
1902 | |||||
1903 | // Put 'this' into the struct before 'sret', if necessary. | ||||
1904 | bool IsThisCall = | ||||
1905 | FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; | ||||
1906 | ABIArgInfo &Ret = FI.getReturnInfo(); | ||||
1907 | if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && | ||||
1908 | isArgInAlloca(I->info)) { | ||||
1909 | addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); | ||||
1910 | ++I; | ||||
1911 | } | ||||
1912 | |||||
1913 | // Put the sret parameter into the inalloca struct if it's in memory. | ||||
1914 | if (Ret.isIndirect() && !Ret.getInReg()) { | ||||
1915 | CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); | ||||
1916 | addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); | ||||
1917 | // On Windows, the hidden sret parameter is always returned in eax. | ||||
1918 | Ret.setInAllocaSRet(IsWin32StructABI); | ||||
1919 | } | ||||
1920 | |||||
1921 | // Skip the 'this' parameter in ecx. | ||||
1922 | if (IsThisCall) | ||||
1923 | ++I; | ||||
1924 | |||||
1925 | // Put arguments passed in memory into the struct. | ||||
1926 | for (; I != E; ++I) { | ||||
1927 | if (isArgInAlloca(I->info)) | ||||
1928 | addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); | ||||
1929 | } | ||||
1930 | |||||
1931 | FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, | ||||
1932 | /*isPacked=*/true), | ||||
1933 | StackAlign); | ||||
1934 | } | ||||
1935 | |||||
1936 | Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, | ||||
1937 | Address VAListAddr, QualType Ty) const { | ||||
1938 | |||||
1939 | auto TypeInfo = getContext().getTypeInfoInChars(Ty); | ||||
1940 | |||||
1941 | // x86-32 changes the alignment of certain arguments on the stack. | ||||
1942 | // | ||||
1943 | // Just messing with TypeInfo like this works because we never pass | ||||
1944 | // anything indirectly. | ||||
1945 | TypeInfo.second = CharUnits::fromQuantity( | ||||
1946 | getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity())); | ||||
1947 | |||||
1948 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, | ||||
1949 | TypeInfo, CharUnits::fromQuantity(4), | ||||
1950 | /*AllowHigherAlign*/ true); | ||||
1951 | } | ||||
1952 | |||||
1953 | bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( | ||||
1954 | const llvm::Triple &Triple, const CodeGenOptions &Opts) { | ||||
1955 | assert(Triple.getArch() == llvm::Triple::x86)((Triple.getArch() == llvm::Triple::x86) ? static_cast<void > (0) : __assert_fail ("Triple.getArch() == llvm::Triple::x86" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 1955, __PRETTY_FUNCTION__)); | ||||
1956 | |||||
1957 | switch (Opts.getStructReturnConvention()) { | ||||
1958 | case CodeGenOptions::SRCK_Default: | ||||
1959 | break; | ||||
1960 | case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return | ||||
1961 | return false; | ||||
1962 | case CodeGenOptions::SRCK_InRegs: // -freg-struct-return | ||||
1963 | return true; | ||||
1964 | } | ||||
1965 | |||||
1966 | if (Triple.isOSDarwin() || Triple.isOSIAMCU()) | ||||
1967 | return true; | ||||
1968 | |||||
1969 | switch (Triple.getOS()) { | ||||
1970 | case llvm::Triple::DragonFly: | ||||
1971 | case llvm::Triple::FreeBSD: | ||||
1972 | case llvm::Triple::OpenBSD: | ||||
1973 | case llvm::Triple::Win32: | ||||
1974 | return true; | ||||
1975 | default: | ||||
1976 | return false; | ||||
1977 | } | ||||
1978 | } | ||||
1979 | |||||
1980 | void X86_32TargetCodeGenInfo::setTargetAttributes( | ||||
1981 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { | ||||
1982 | if (GV->isDeclaration()) | ||||
1983 | return; | ||||
1984 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { | ||||
1985 | if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { | ||||
1986 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
1987 | Fn->addFnAttr("stackrealign"); | ||||
1988 | } | ||||
1989 | if (FD->hasAttr<AnyX86InterruptAttr>()) { | ||||
1990 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
1991 | Fn->setCallingConv(llvm::CallingConv::X86_INTR); | ||||
1992 | } | ||||
1993 | } | ||||
1994 | } | ||||
1995 | |||||
1996 | bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( | ||||
1997 | CodeGen::CodeGenFunction &CGF, | ||||
1998 | llvm::Value *Address) const { | ||||
1999 | CodeGen::CGBuilderTy &Builder = CGF.Builder; | ||||
2000 | |||||
2001 | llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); | ||||
2002 | |||||
2003 | // 0-7 are the eight integer registers; the order is different | ||||
2004 | // on Darwin (for EH), but the range is the same. | ||||
2005 | // 8 is %eip. | ||||
2006 | AssignToArrayRange(Builder, Address, Four8, 0, 8); | ||||
2007 | |||||
2008 | if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { | ||||
2009 | // 12-16 are st(0..4). Not sure why we stop at 4. | ||||
2010 | // These have size 16, which is sizeof(long double) on | ||||
2011 | // platforms with 8-byte alignment for that type. | ||||
2012 | llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); | ||||
2013 | AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); | ||||
2014 | |||||
2015 | } else { | ||||
2016 | // 9 is %eflags, which doesn't get a size on Darwin for some | ||||
2017 | // reason. | ||||
2018 | Builder.CreateAlignedStore( | ||||
2019 | Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), | ||||
2020 | CharUnits::One()); | ||||
2021 | |||||
2022 | // 11-16 are st(0..5). Not sure why we stop at 5. | ||||
2023 | // These have size 12, which is sizeof(long double) on | ||||
2024 | // platforms with 4-byte alignment for that type. | ||||
2025 | llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); | ||||
2026 | AssignToArrayRange(Builder, Address, Twelve8, 11, 16); | ||||
2027 | } | ||||
2028 | |||||
2029 | return false; | ||||
2030 | } | ||||
2031 | |||||
2032 | //===----------------------------------------------------------------------===// | ||||
2033 | // X86-64 ABI Implementation | ||||
2034 | //===----------------------------------------------------------------------===// | ||||
2035 | |||||
2036 | |||||
2037 | namespace { | ||||
2038 | /// The AVX ABI level for X86 targets. | ||||
2039 | enum class X86AVXABILevel { | ||||
2040 | None, | ||||
2041 | AVX, | ||||
2042 | AVX512 | ||||
2043 | }; | ||||
2044 | |||||
2045 | /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. | ||||
2046 | static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { | ||||
2047 | switch (AVXLevel) { | ||||
2048 | case X86AVXABILevel::AVX512: | ||||
2049 | return 512; | ||||
2050 | case X86AVXABILevel::AVX: | ||||
2051 | return 256; | ||||
2052 | case X86AVXABILevel::None: | ||||
2053 | return 128; | ||||
2054 | } | ||||
2055 | llvm_unreachable("Unknown AVXLevel")::llvm::llvm_unreachable_internal("Unknown AVXLevel", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 2055); | ||||
2056 | } | ||||
2057 | |||||
2058 | /// X86_64ABIInfo - The X86_64 ABI information. | ||||
2059 | class X86_64ABIInfo : public SwiftABIInfo { | ||||
2060 | enum Class { | ||||
2061 | Integer = 0, | ||||
2062 | SSE, | ||||
2063 | SSEUp, | ||||
2064 | X87, | ||||
2065 | X87Up, | ||||
2066 | ComplexX87, | ||||
2067 | NoClass, | ||||
2068 | Memory | ||||
2069 | }; | ||||
2070 | |||||
2071 | /// merge - Implement the X86_64 ABI merging algorithm. | ||||
2072 | /// | ||||
2073 | /// Merge an accumulating classification \arg Accum with a field | ||||
2074 | /// classification \arg Field. | ||||
2075 | /// | ||||
2076 | /// \param Accum - The accumulating classification. This should | ||||
2077 | /// always be either NoClass or the result of a previous merge | ||||
2078 | /// call. In addition, this should never be Memory (the caller | ||||
2079 | /// should just return Memory for the aggregate). | ||||
2080 | static Class merge(Class Accum, Class Field); | ||||
2081 | |||||
2082 | /// postMerge - Implement the X86_64 ABI post merging algorithm. | ||||
2083 | /// | ||||
2084 | /// Post merger cleanup, reduces a malformed Hi and Lo pair to | ||||
2085 | /// final MEMORY or SSE classes when necessary. | ||||
2086 | /// | ||||
2087 | /// \param AggregateSize - The size of the current aggregate in | ||||
2088 | /// the classification process. | ||||
2089 | /// | ||||
2090 | /// \param Lo - The classification for the parts of the type | ||||
2091 | /// residing in the low word of the containing object. | ||||
2092 | /// | ||||
2093 | /// \param Hi - The classification for the parts of the type | ||||
2094 | /// residing in the higher words of the containing object. | ||||
2095 | /// | ||||
2096 | void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; | ||||
2097 | |||||
2098 | /// classify - Determine the x86_64 register classes in which the | ||||
2099 | /// given type T should be passed. | ||||
2100 | /// | ||||
2101 | /// \param Lo - The classification for the parts of the type | ||||
2102 | /// residing in the low word of the containing object. | ||||
2103 | /// | ||||
2104 | /// \param Hi - The classification for the parts of the type | ||||
2105 | /// residing in the high word of the containing object. | ||||
2106 | /// | ||||
2107 | /// \param OffsetBase - The bit offset of this type in the | ||||
2108 | /// containing object. Some parameters are classified different | ||||
2109 | /// depending on whether they straddle an eightbyte boundary. | ||||
2110 | /// | ||||
2111 | /// \param isNamedArg - Whether the argument in question is a "named" | ||||
2112 | /// argument, as used in AMD64-ABI 3.5.7. | ||||
2113 | /// | ||||
2114 | /// If a word is unused its result will be NoClass; if a type should | ||||
2115 | /// be passed in Memory then at least the classification of \arg Lo | ||||
2116 | /// will be Memory. | ||||
2117 | /// | ||||
2118 | /// The \arg Lo class will be NoClass iff the argument is ignored. | ||||
2119 | /// | ||||
2120 | /// If the \arg Lo class is ComplexX87, then the \arg Hi class will | ||||
2121 | /// also be ComplexX87. | ||||
2122 | void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, | ||||
2123 | bool isNamedArg) const; | ||||
2124 | |||||
2125 | llvm::Type *GetByteVectorType(QualType Ty) const; | ||||
2126 | llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, | ||||
2127 | unsigned IROffset, QualType SourceTy, | ||||
2128 | unsigned SourceOffset) const; | ||||
2129 | llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, | ||||
2130 | unsigned IROffset, QualType SourceTy, | ||||
2131 | unsigned SourceOffset) const; | ||||
2132 | |||||
2133 | /// getIndirectResult - Give a source type \arg Ty, return a suitable result | ||||
2134 | /// such that the argument will be returned in memory. | ||||
2135 | ABIArgInfo getIndirectReturnResult(QualType Ty) const; | ||||
2136 | |||||
2137 | /// getIndirectResult - Give a source type \arg Ty, return a suitable result | ||||
2138 | /// such that the argument will be passed in memory. | ||||
2139 | /// | ||||
2140 | /// \param freeIntRegs - The number of free integer registers remaining | ||||
2141 | /// available. | ||||
2142 | ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; | ||||
2143 | |||||
2144 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
2145 | |||||
2146 | ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs, | ||||
2147 | unsigned &neededInt, unsigned &neededSSE, | ||||
2148 | bool isNamedArg) const; | ||||
2149 | |||||
2150 | ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt, | ||||
2151 | unsigned &NeededSSE) const; | ||||
2152 | |||||
2153 | ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, | ||||
2154 | unsigned &NeededSSE) const; | ||||
2155 | |||||
2156 | bool IsIllegalVectorType(QualType Ty) const; | ||||
2157 | |||||
2158 | /// The 0.98 ABI revision clarified a lot of ambiguities, | ||||
2159 | /// unfortunately in ways that were not always consistent with | ||||
2160 | /// certain previous compilers. In particular, platforms which | ||||
2161 | /// required strict binary compatibility with older versions of GCC | ||||
2162 | /// may need to exempt themselves. | ||||
2163 | bool honorsRevision0_98() const { | ||||
2164 | return !getTarget().getTriple().isOSDarwin(); | ||||
2165 | } | ||||
2166 | |||||
2167 | /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to | ||||
2168 | /// classify it as INTEGER (for compatibility with older clang compilers). | ||||
2169 | bool classifyIntegerMMXAsSSE() const { | ||||
2170 | // Clang <= 3.8 did not do this. | ||||
2171 | if (getContext().getLangOpts().getClangABICompat() <= | ||||
2172 | LangOptions::ClangABI::Ver3_8) | ||||
2173 | return false; | ||||
2174 | |||||
2175 | const llvm::Triple &Triple = getTarget().getTriple(); | ||||
2176 | if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4) | ||||
2177 | return false; | ||||
2178 | if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10) | ||||
2179 | return false; | ||||
2180 | return true; | ||||
2181 | } | ||||
2182 | |||||
2183 | // GCC classifies vectors of __int128 as memory. | ||||
2184 | bool passInt128VectorsInMem() const { | ||||
2185 | // Clang <= 9.0 did not do this. | ||||
2186 | if (getContext().getLangOpts().getClangABICompat() <= | ||||
2187 | LangOptions::ClangABI::Ver9) | ||||
2188 | return false; | ||||
2189 | |||||
2190 | const llvm::Triple &T = getTarget().getTriple(); | ||||
2191 | return T.isOSLinux() || T.isOSNetBSD(); | ||||
2192 | } | ||||
2193 | |||||
2194 | X86AVXABILevel AVXLevel; | ||||
2195 | // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on | ||||
2196 | // 64-bit hardware. | ||||
2197 | bool Has64BitPointers; | ||||
2198 | |||||
2199 | public: | ||||
2200 | X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : | ||||
2201 | SwiftABIInfo(CGT), AVXLevel(AVXLevel), | ||||
2202 | Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { | ||||
2203 | } | ||||
2204 | |||||
2205 | bool isPassedUsingAVXType(QualType type) const { | ||||
2206 | unsigned neededInt, neededSSE; | ||||
2207 | // The freeIntRegs argument doesn't matter here. | ||||
2208 | ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, | ||||
2209 | /*isNamedArg*/true); | ||||
2210 | if (info.isDirect()) { | ||||
2211 | llvm::Type *ty = info.getCoerceToType(); | ||||
2212 | if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) | ||||
2213 | return (vectorTy->getBitWidth() > 128); | ||||
2214 | } | ||||
2215 | return false; | ||||
2216 | } | ||||
2217 | |||||
2218 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
2219 | |||||
2220 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
2221 | QualType Ty) const override; | ||||
2222 | Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
2223 | QualType Ty) const override; | ||||
2224 | |||||
2225 | bool has64BitPointers() const { | ||||
2226 | return Has64BitPointers; | ||||
2227 | } | ||||
2228 | |||||
2229 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, | ||||
2230 | bool asReturnValue) const override { | ||||
2231 | return occupiesMoreThan(CGT, scalars, /*total*/ 4); | ||||
2232 | } | ||||
2233 | bool isSwiftErrorInRegister() const override { | ||||
2234 | return true; | ||||
2235 | } | ||||
2236 | }; | ||||
2237 | |||||
2238 | /// WinX86_64ABIInfo - The Windows X86_64 ABI information. | ||||
2239 | class WinX86_64ABIInfo : public SwiftABIInfo { | ||||
2240 | public: | ||||
2241 | WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) | ||||
2242 | : SwiftABIInfo(CGT), AVXLevel(AVXLevel), | ||||
2243 | IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} | ||||
2244 | |||||
2245 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
2246 | |||||
2247 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
2248 | QualType Ty) const override; | ||||
2249 | |||||
2250 | bool isHomogeneousAggregateBaseType(QualType Ty) const override { | ||||
2251 | // FIXME: Assumes vectorcall is in use. | ||||
2252 | return isX86VectorTypeForVectorCall(getContext(), Ty); | ||||
2253 | } | ||||
2254 | |||||
2255 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, | ||||
2256 | uint64_t NumMembers) const override { | ||||
2257 | // FIXME: Assumes vectorcall is in use. | ||||
2258 | return isX86VectorCallAggregateSmallEnough(NumMembers); | ||||
2259 | } | ||||
2260 | |||||
2261 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars, | ||||
2262 | bool asReturnValue) const override { | ||||
2263 | return occupiesMoreThan(CGT, scalars, /*total*/ 4); | ||||
2264 | } | ||||
2265 | |||||
2266 | bool isSwiftErrorInRegister() const override { | ||||
2267 | return true; | ||||
2268 | } | ||||
2269 | |||||
2270 | private: | ||||
2271 | ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType, | ||||
2272 | bool IsVectorCall, bool IsRegCall) const; | ||||
2273 | ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, | ||||
2274 | const ABIArgInfo ¤t) const; | ||||
2275 | void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs, | ||||
2276 | bool IsVectorCall, bool IsRegCall) const; | ||||
2277 | |||||
2278 | X86AVXABILevel AVXLevel; | ||||
2279 | |||||
2280 | bool IsMingw64; | ||||
2281 | }; | ||||
2282 | |||||
2283 | class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { | ||||
2284 | public: | ||||
2285 | X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) | ||||
2286 | : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {} | ||||
2287 | |||||
2288 | const X86_64ABIInfo &getABIInfo() const { | ||||
2289 | return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); | ||||
2290 | } | ||||
2291 | |||||
2292 | /// Disable tail call on x86-64. The epilogue code before the tail jump blocks | ||||
2293 | /// the autoreleaseRV/retainRV optimization. | ||||
2294 | bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const override { | ||||
2295 | return true; | ||||
2296 | } | ||||
2297 | |||||
2298 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { | ||||
2299 | return 7; | ||||
2300 | } | ||||
2301 | |||||
2302 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
2303 | llvm::Value *Address) const override { | ||||
2304 | llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); | ||||
2305 | |||||
2306 | // 0-15 are the 16 integer registers. | ||||
2307 | // 16 is %rip. | ||||
2308 | AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); | ||||
2309 | return false; | ||||
2310 | } | ||||
2311 | |||||
2312 | llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, | ||||
2313 | StringRef Constraint, | ||||
2314 | llvm::Type* Ty) const override { | ||||
2315 | return X86AdjustInlineAsmType(CGF, Constraint, Ty); | ||||
2316 | } | ||||
2317 | |||||
2318 | bool isNoProtoCallVariadic(const CallArgList &args, | ||||
2319 | const FunctionNoProtoType *fnType) const override { | ||||
2320 | // The default CC on x86-64 sets %al to the number of SSA | ||||
2321 | // registers used, and GCC sets this when calling an unprototyped | ||||
2322 | // function, so we override the default behavior. However, don't do | ||||
2323 | // that when AVX types are involved: the ABI explicitly states it is | ||||
2324 | // undefined, and it doesn't work in practice because of how the ABI | ||||
2325 | // defines varargs anyway. | ||||
2326 | if (fnType->getCallConv() == CC_C) { | ||||
2327 | bool HasAVXType = false; | ||||
2328 | for (CallArgList::const_iterator | ||||
2329 | it = args.begin(), ie = args.end(); it != ie; ++it) { | ||||
2330 | if (getABIInfo().isPassedUsingAVXType(it->Ty)) { | ||||
2331 | HasAVXType = true; | ||||
2332 | break; | ||||
2333 | } | ||||
2334 | } | ||||
2335 | |||||
2336 | if (!HasAVXType) | ||||
2337 | return true; | ||||
2338 | } | ||||
2339 | |||||
2340 | return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); | ||||
2341 | } | ||||
2342 | |||||
2343 | llvm::Constant * | ||||
2344 | getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { | ||||
2345 | unsigned Sig = (0xeb << 0) | // jmp rel8 | ||||
2346 | (0x06 << 8) | // .+0x08 | ||||
2347 | ('v' << 16) | | ||||
2348 | ('2' << 24); | ||||
2349 | return llvm::ConstantInt::get(CGM.Int32Ty, Sig); | ||||
2350 | } | ||||
2351 | |||||
2352 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
2353 | CodeGen::CodeGenModule &CGM) const override { | ||||
2354 | if (GV->isDeclaration()) | ||||
2355 | return; | ||||
2356 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { | ||||
2357 | if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { | ||||
2358 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
2359 | Fn->addFnAttr("stackrealign"); | ||||
2360 | } | ||||
2361 | if (FD->hasAttr<AnyX86InterruptAttr>()) { | ||||
2362 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
2363 | Fn->setCallingConv(llvm::CallingConv::X86_INTR); | ||||
2364 | } | ||||
2365 | } | ||||
2366 | } | ||||
2367 | }; | ||||
2368 | |||||
2369 | static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { | ||||
2370 | // If the argument does not end in .lib, automatically add the suffix. | ||||
2371 | // If the argument contains a space, enclose it in quotes. | ||||
2372 | // This matches the behavior of MSVC. | ||||
2373 | bool Quote = (Lib.find(" ") != StringRef::npos); | ||||
2374 | std::string ArgStr = Quote ? "\"" : ""; | ||||
2375 | ArgStr += Lib; | ||||
2376 | if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a")) | ||||
2377 | ArgStr += ".lib"; | ||||
2378 | ArgStr += Quote ? "\"" : ""; | ||||
2379 | return ArgStr; | ||||
2380 | } | ||||
2381 | |||||
2382 | class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { | ||||
2383 | public: | ||||
2384 | WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, | ||||
2385 | bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, | ||||
2386 | unsigned NumRegisterParameters) | ||||
2387 | : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, | ||||
2388 | Win32StructABI, NumRegisterParameters, false) {} | ||||
2389 | |||||
2390 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
2391 | CodeGen::CodeGenModule &CGM) const override; | ||||
2392 | |||||
2393 | void getDependentLibraryOption(llvm::StringRef Lib, | ||||
2394 | llvm::SmallString<24> &Opt) const override { | ||||
2395 | Opt = "/DEFAULTLIB:"; | ||||
2396 | Opt += qualifyWindowsLibrary(Lib); | ||||
2397 | } | ||||
2398 | |||||
2399 | void getDetectMismatchOption(llvm::StringRef Name, | ||||
2400 | llvm::StringRef Value, | ||||
2401 | llvm::SmallString<32> &Opt) const override { | ||||
2402 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; | ||||
2403 | } | ||||
2404 | }; | ||||
2405 | |||||
2406 | static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
2407 | CodeGen::CodeGenModule &CGM) { | ||||
2408 | if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) { | ||||
2409 | |||||
2410 | if (CGM.getCodeGenOpts().StackProbeSize != 4096) | ||||
2411 | Fn->addFnAttr("stack-probe-size", | ||||
2412 | llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); | ||||
2413 | if (CGM.getCodeGenOpts().NoStackArgProbe) | ||||
2414 | Fn->addFnAttr("no-stack-arg-probe"); | ||||
2415 | } | ||||
2416 | } | ||||
2417 | |||||
2418 | void WinX86_32TargetCodeGenInfo::setTargetAttributes( | ||||
2419 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { | ||||
2420 | X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); | ||||
2421 | if (GV->isDeclaration()) | ||||
2422 | return; | ||||
2423 | addStackProbeTargetAttributes(D, GV, CGM); | ||||
2424 | } | ||||
2425 | |||||
2426 | class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { | ||||
2427 | public: | ||||
2428 | WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, | ||||
2429 | X86AVXABILevel AVXLevel) | ||||
2430 | : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT, AVXLevel)) {} | ||||
2431 | |||||
2432 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
2433 | CodeGen::CodeGenModule &CGM) const override; | ||||
2434 | |||||
2435 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { | ||||
2436 | return 7; | ||||
2437 | } | ||||
2438 | |||||
2439 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
2440 | llvm::Value *Address) const override { | ||||
2441 | llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); | ||||
2442 | |||||
2443 | // 0-15 are the 16 integer registers. | ||||
2444 | // 16 is %rip. | ||||
2445 | AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); | ||||
2446 | return false; | ||||
2447 | } | ||||
2448 | |||||
2449 | void getDependentLibraryOption(llvm::StringRef Lib, | ||||
2450 | llvm::SmallString<24> &Opt) const override { | ||||
2451 | Opt = "/DEFAULTLIB:"; | ||||
2452 | Opt += qualifyWindowsLibrary(Lib); | ||||
2453 | } | ||||
2454 | |||||
2455 | void getDetectMismatchOption(llvm::StringRef Name, | ||||
2456 | llvm::StringRef Value, | ||||
2457 | llvm::SmallString<32> &Opt) const override { | ||||
2458 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; | ||||
2459 | } | ||||
2460 | }; | ||||
2461 | |||||
2462 | void WinX86_64TargetCodeGenInfo::setTargetAttributes( | ||||
2463 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { | ||||
2464 | TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); | ||||
2465 | if (GV->isDeclaration()) | ||||
2466 | return; | ||||
2467 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { | ||||
2468 | if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { | ||||
2469 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
2470 | Fn->addFnAttr("stackrealign"); | ||||
2471 | } | ||||
2472 | if (FD->hasAttr<AnyX86InterruptAttr>()) { | ||||
2473 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
2474 | Fn->setCallingConv(llvm::CallingConv::X86_INTR); | ||||
2475 | } | ||||
2476 | } | ||||
2477 | |||||
2478 | addStackProbeTargetAttributes(D, GV, CGM); | ||||
2479 | } | ||||
2480 | } | ||||
2481 | |||||
2482 | void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, | ||||
2483 | Class &Hi) const { | ||||
2484 | // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: | ||||
2485 | // | ||||
2486 | // (a) If one of the classes is Memory, the whole argument is passed in | ||||
2487 | // memory. | ||||
2488 | // | ||||
2489 | // (b) If X87UP is not preceded by X87, the whole argument is passed in | ||||
2490 | // memory. | ||||
2491 | // | ||||
2492 | // (c) If the size of the aggregate exceeds two eightbytes and the first | ||||
2493 | // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole | ||||
2494 | // argument is passed in memory. NOTE: This is necessary to keep the | ||||
2495 | // ABI working for processors that don't support the __m256 type. | ||||
2496 | // | ||||
2497 | // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. | ||||
2498 | // | ||||
2499 | // Some of these are enforced by the merging logic. Others can arise | ||||
2500 | // only with unions; for example: | ||||
2501 | // union { _Complex double; unsigned; } | ||||
2502 | // | ||||
2503 | // Note that clauses (b) and (c) were added in 0.98. | ||||
2504 | // | ||||
2505 | if (Hi == Memory) | ||||
2506 | Lo = Memory; | ||||
2507 | if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) | ||||
2508 | Lo = Memory; | ||||
2509 | if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) | ||||
2510 | Lo = Memory; | ||||
2511 | if (Hi == SSEUp && Lo != SSE) | ||||
2512 | Hi = SSE; | ||||
2513 | } | ||||
2514 | |||||
2515 | X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { | ||||
2516 | // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is | ||||
2517 | // classified recursively so that always two fields are | ||||
2518 | // considered. The resulting class is calculated according to | ||||
2519 | // the classes of the fields in the eightbyte: | ||||
2520 | // | ||||
2521 | // (a) If both classes are equal, this is the resulting class. | ||||
2522 | // | ||||
2523 | // (b) If one of the classes is NO_CLASS, the resulting class is | ||||
2524 | // the other class. | ||||
2525 | // | ||||
2526 | // (c) If one of the classes is MEMORY, the result is the MEMORY | ||||
2527 | // class. | ||||
2528 | // | ||||
2529 | // (d) If one of the classes is INTEGER, the result is the | ||||
2530 | // INTEGER. | ||||
2531 | // | ||||
2532 | // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, | ||||
2533 | // MEMORY is used as class. | ||||
2534 | // | ||||
2535 | // (f) Otherwise class SSE is used. | ||||
2536 | |||||
2537 | // Accum should never be memory (we should have returned) or | ||||
2538 | // ComplexX87 (because this cannot be passed in a structure). | ||||
2539 | assert((Accum != Memory && Accum != ComplexX87) &&(((Accum != Memory && Accum != ComplexX87) && "Invalid accumulated classification during merge.") ? static_cast <void> (0) : __assert_fail ("(Accum != Memory && Accum != ComplexX87) && \"Invalid accumulated classification during merge.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 2540, __PRETTY_FUNCTION__)) | ||||
2540 | "Invalid accumulated classification during merge.")(((Accum != Memory && Accum != ComplexX87) && "Invalid accumulated classification during merge.") ? static_cast <void> (0) : __assert_fail ("(Accum != Memory && Accum != ComplexX87) && \"Invalid accumulated classification during merge.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 2540, __PRETTY_FUNCTION__)); | ||||
2541 | if (Accum == Field || Field == NoClass) | ||||
2542 | return Accum; | ||||
2543 | if (Field == Memory) | ||||
2544 | return Memory; | ||||
2545 | if (Accum == NoClass) | ||||
2546 | return Field; | ||||
2547 | if (Accum == Integer || Field == Integer) | ||||
2548 | return Integer; | ||||
2549 | if (Field == X87 || Field == X87Up || Field == ComplexX87 || | ||||
2550 | Accum == X87 || Accum == X87Up) | ||||
2551 | return Memory; | ||||
2552 | return SSE; | ||||
2553 | } | ||||
2554 | |||||
2555 | void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, | ||||
2556 | Class &Lo, Class &Hi, bool isNamedArg) const { | ||||
2557 | // FIXME: This code can be simplified by introducing a simple value class for | ||||
2558 | // Class pairs with appropriate constructor methods for the various | ||||
2559 | // situations. | ||||
2560 | |||||
2561 | // FIXME: Some of the split computations are wrong; unaligned vectors | ||||
2562 | // shouldn't be passed in registers for example, so there is no chance they | ||||
2563 | // can straddle an eightbyte. Verify & simplify. | ||||
2564 | |||||
2565 | Lo = Hi = NoClass; | ||||
2566 | |||||
2567 | Class &Current = OffsetBase < 64 ? Lo : Hi; | ||||
2568 | Current = Memory; | ||||
2569 | |||||
2570 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { | ||||
2571 | BuiltinType::Kind k = BT->getKind(); | ||||
2572 | |||||
2573 | if (k == BuiltinType::Void) { | ||||
2574 | Current = NoClass; | ||||
2575 | } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { | ||||
2576 | Lo = Integer; | ||||
2577 | Hi = Integer; | ||||
2578 | } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { | ||||
2579 | Current = Integer; | ||||
2580 | } else if (k == BuiltinType::Float || k == BuiltinType::Double) { | ||||
2581 | Current = SSE; | ||||
2582 | } else if (k == BuiltinType::LongDouble) { | ||||
2583 | const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); | ||||
2584 | if (LDF == &llvm::APFloat::IEEEquad()) { | ||||
2585 | Lo = SSE; | ||||
2586 | Hi = SSEUp; | ||||
2587 | } else if (LDF == &llvm::APFloat::x87DoubleExtended()) { | ||||
2588 | Lo = X87; | ||||
2589 | Hi = X87Up; | ||||
2590 | } else if (LDF == &llvm::APFloat::IEEEdouble()) { | ||||
2591 | Current = SSE; | ||||
2592 | } else | ||||
2593 | llvm_unreachable("unexpected long double representation!")::llvm::llvm_unreachable_internal("unexpected long double representation!" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 2593); | ||||
2594 | } | ||||
2595 | // FIXME: _Decimal32 and _Decimal64 are SSE. | ||||
2596 | // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). | ||||
2597 | return; | ||||
2598 | } | ||||
2599 | |||||
2600 | if (const EnumType *ET = Ty->getAs<EnumType>()) { | ||||
2601 | // Classify the underlying integer type. | ||||
2602 | classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); | ||||
2603 | return; | ||||
2604 | } | ||||
2605 | |||||
2606 | if (Ty->hasPointerRepresentation()) { | ||||
2607 | Current = Integer; | ||||
2608 | return; | ||||
2609 | } | ||||
2610 | |||||
2611 | if (Ty->isMemberPointerType()) { | ||||
2612 | if (Ty->isMemberFunctionPointerType()) { | ||||
2613 | if (Has64BitPointers) { | ||||
2614 | // If Has64BitPointers, this is an {i64, i64}, so classify both | ||||
2615 | // Lo and Hi now. | ||||
2616 | Lo = Hi = Integer; | ||||
2617 | } else { | ||||
2618 | // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that | ||||
2619 | // straddles an eightbyte boundary, Hi should be classified as well. | ||||
2620 | uint64_t EB_FuncPtr = (OffsetBase) / 64; | ||||
2621 | uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; | ||||
2622 | if (EB_FuncPtr != EB_ThisAdj) { | ||||
2623 | Lo = Hi = Integer; | ||||
2624 | } else { | ||||
2625 | Current = Integer; | ||||
2626 | } | ||||
2627 | } | ||||
2628 | } else { | ||||
2629 | Current = Integer; | ||||
2630 | } | ||||
2631 | return; | ||||
2632 | } | ||||
2633 | |||||
2634 | if (const VectorType *VT = Ty->getAs<VectorType>()) { | ||||
2635 | uint64_t Size = getContext().getTypeSize(VT); | ||||
2636 | if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { | ||||
2637 | // gcc passes the following as integer: | ||||
2638 | // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float> | ||||
2639 | // 2 bytes - <2 x char>, <1 x short> | ||||
2640 | // 1 byte - <1 x char> | ||||
2641 | Current = Integer; | ||||
2642 | |||||
2643 | // If this type crosses an eightbyte boundary, it should be | ||||
2644 | // split. | ||||
2645 | uint64_t EB_Lo = (OffsetBase) / 64; | ||||
2646 | uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; | ||||
2647 | if (EB_Lo != EB_Hi) | ||||
2648 | Hi = Lo; | ||||
2649 | } else if (Size == 64) { | ||||
2650 | QualType ElementType = VT->getElementType(); | ||||
2651 | |||||
2652 | // gcc passes <1 x double> in memory. :( | ||||
2653 | if (ElementType->isSpecificBuiltinType(BuiltinType::Double)) | ||||
2654 | return; | ||||
2655 | |||||
2656 | // gcc passes <1 x long long> as SSE but clang used to unconditionally | ||||
2657 | // pass them as integer. For platforms where clang is the de facto | ||||
2658 | // platform compiler, we must continue to use integer. | ||||
2659 | if (!classifyIntegerMMXAsSSE() && | ||||
2660 | (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) || | ||||
2661 | ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) || | ||||
2662 | ElementType->isSpecificBuiltinType(BuiltinType::Long) || | ||||
2663 | ElementType->isSpecificBuiltinType(BuiltinType::ULong))) | ||||
2664 | Current = Integer; | ||||
2665 | else | ||||
2666 | Current = SSE; | ||||
2667 | |||||
2668 | // If this type crosses an eightbyte boundary, it should be | ||||
2669 | // split. | ||||
2670 | if (OffsetBase && OffsetBase != 64) | ||||
2671 | Hi = Lo; | ||||
2672 | } else if (Size == 128 || | ||||
2673 | (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { | ||||
2674 | QualType ElementType = VT->getElementType(); | ||||
2675 | |||||
2676 | // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :( | ||||
2677 | if (passInt128VectorsInMem() && Size != 128 && | ||||
2678 | (ElementType->isSpecificBuiltinType(BuiltinType::Int128) || | ||||
2679 | ElementType->isSpecificBuiltinType(BuiltinType::UInt128))) | ||||
2680 | return; | ||||
2681 | |||||
2682 | // Arguments of 256-bits are split into four eightbyte chunks. The | ||||
2683 | // least significant one belongs to class SSE and all the others to class | ||||
2684 | // SSEUP. The original Lo and Hi design considers that types can't be | ||||
2685 | // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. | ||||
2686 | // This design isn't correct for 256-bits, but since there're no cases | ||||
2687 | // where the upper parts would need to be inspected, avoid adding | ||||
2688 | // complexity and just consider Hi to match the 64-256 part. | ||||
2689 | // | ||||
2690 | // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in | ||||
2691 | // registers if they are "named", i.e. not part of the "..." of a | ||||
2692 | // variadic function. | ||||
2693 | // | ||||
2694 | // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are | ||||
2695 | // split into eight eightbyte chunks, one SSE and seven SSEUP. | ||||
2696 | Lo = SSE; | ||||
2697 | Hi = SSEUp; | ||||
2698 | } | ||||
2699 | return; | ||||
2700 | } | ||||
2701 | |||||
2702 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { | ||||
2703 | QualType ET = getContext().getCanonicalType(CT->getElementType()); | ||||
2704 | |||||
2705 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
2706 | if (ET->isIntegralOrEnumerationType()) { | ||||
2707 | if (Size <= 64) | ||||
2708 | Current = Integer; | ||||
2709 | else if (Size <= 128) | ||||
2710 | Lo = Hi = Integer; | ||||
2711 | } else if (ET == getContext().FloatTy) { | ||||
2712 | Current = SSE; | ||||
2713 | } else if (ET == getContext().DoubleTy) { | ||||
2714 | Lo = Hi = SSE; | ||||
2715 | } else if (ET == getContext().LongDoubleTy) { | ||||
2716 | const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); | ||||
2717 | if (LDF == &llvm::APFloat::IEEEquad()) | ||||
2718 | Current = Memory; | ||||
2719 | else if (LDF == &llvm::APFloat::x87DoubleExtended()) | ||||
2720 | Current = ComplexX87; | ||||
2721 | else if (LDF == &llvm::APFloat::IEEEdouble()) | ||||
2722 | Lo = Hi = SSE; | ||||
2723 | else | ||||
2724 | llvm_unreachable("unexpected long double representation!")::llvm::llvm_unreachable_internal("unexpected long double representation!" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 2724); | ||||
2725 | } | ||||
2726 | |||||
2727 | // If this complex type crosses an eightbyte boundary then it | ||||
2728 | // should be split. | ||||
2729 | uint64_t EB_Real = (OffsetBase) / 64; | ||||
2730 | uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; | ||||
2731 | if (Hi == NoClass && EB_Real != EB_Imag) | ||||
2732 | Hi = Lo; | ||||
2733 | |||||
2734 | return; | ||||
2735 | } | ||||
2736 | |||||
2737 | if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { | ||||
2738 | // Arrays are treated like structures. | ||||
2739 | |||||
2740 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
2741 | |||||
2742 | // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger | ||||
2743 | // than eight eightbytes, ..., it has class MEMORY. | ||||
2744 | if (Size > 512) | ||||
2745 | return; | ||||
2746 | |||||
2747 | // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned | ||||
2748 | // fields, it has class MEMORY. | ||||
2749 | // | ||||
2750 | // Only need to check alignment of array base. | ||||
2751 | if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) | ||||
2752 | return; | ||||
2753 | |||||
2754 | // Otherwise implement simplified merge. We could be smarter about | ||||
2755 | // this, but it isn't worth it and would be harder to verify. | ||||
2756 | Current = NoClass; | ||||
2757 | uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); | ||||
2758 | uint64_t ArraySize = AT->getSize().getZExtValue(); | ||||
2759 | |||||
2760 | // The only case a 256-bit wide vector could be used is when the array | ||||
2761 | // contains a single 256-bit element. Since Lo and Hi logic isn't extended | ||||
2762 | // to work for sizes wider than 128, early check and fallback to memory. | ||||
2763 | // | ||||
2764 | if (Size > 128 && | ||||
2765 | (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) | ||||
2766 | return; | ||||
2767 | |||||
2768 | for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { | ||||
2769 | Class FieldLo, FieldHi; | ||||
2770 | classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); | ||||
2771 | Lo = merge(Lo, FieldLo); | ||||
2772 | Hi = merge(Hi, FieldHi); | ||||
2773 | if (Lo == Memory || Hi == Memory) | ||||
2774 | break; | ||||
2775 | } | ||||
2776 | |||||
2777 | postMerge(Size, Lo, Hi); | ||||
2778 | assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.")(((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification." ) ? static_cast<void> (0) : __assert_fail ("(Hi != SSEUp || Lo == SSE) && \"Invalid SSEUp array classification.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 2778, __PRETTY_FUNCTION__)); | ||||
2779 | return; | ||||
2780 | } | ||||
2781 | |||||
2782 | if (const RecordType *RT = Ty->getAs<RecordType>()) { | ||||
2783 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
2784 | |||||
2785 | // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger | ||||
2786 | // than eight eightbytes, ..., it has class MEMORY. | ||||
2787 | if (Size > 512) | ||||
2788 | return; | ||||
2789 | |||||
2790 | // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial | ||||
2791 | // copy constructor or a non-trivial destructor, it is passed by invisible | ||||
2792 | // reference. | ||||
2793 | if (getRecordArgABI(RT, getCXXABI())) | ||||
2794 | return; | ||||
2795 | |||||
2796 | const RecordDecl *RD = RT->getDecl(); | ||||
2797 | |||||
2798 | // Assume variable sized types are passed in memory. | ||||
2799 | if (RD->hasFlexibleArrayMember()) | ||||
2800 | return; | ||||
2801 | |||||
2802 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); | ||||
2803 | |||||
2804 | // Reset Lo class, this will be recomputed. | ||||
2805 | Current = NoClass; | ||||
2806 | |||||
2807 | // If this is a C++ record, classify the bases first. | ||||
2808 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { | ||||
2809 | for (const auto &I : CXXRD->bases()) { | ||||
2810 | assert(!I.isVirtual() && !I.getType()->isDependentType() &&((!I.isVirtual() && !I.getType()->isDependentType( ) && "Unexpected base class!") ? static_cast<void> (0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 2811, __PRETTY_FUNCTION__)) | ||||
2811 | "Unexpected base class!")((!I.isVirtual() && !I.getType()->isDependentType( ) && "Unexpected base class!") ? static_cast<void> (0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 2811, __PRETTY_FUNCTION__)); | ||||
2812 | const CXXRecordDecl *Base = | ||||
2813 | cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); | ||||
2814 | |||||
2815 | // Classify this field. | ||||
2816 | // | ||||
2817 | // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a | ||||
2818 | // single eightbyte, each is classified separately. Each eightbyte gets | ||||
2819 | // initialized to class NO_CLASS. | ||||
2820 | Class FieldLo, FieldHi; | ||||
2821 | uint64_t Offset = | ||||
2822 | OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); | ||||
2823 | classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); | ||||
2824 | Lo = merge(Lo, FieldLo); | ||||
2825 | Hi = merge(Hi, FieldHi); | ||||
2826 | if (Lo == Memory || Hi == Memory) { | ||||
2827 | postMerge(Size, Lo, Hi); | ||||
2828 | return; | ||||
2829 | } | ||||
2830 | } | ||||
2831 | } | ||||
2832 | |||||
2833 | // Classify the fields one at a time, merging the results. | ||||
2834 | unsigned idx = 0; | ||||
2835 | for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); | ||||
2836 | i != e; ++i, ++idx) { | ||||
2837 | uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); | ||||
2838 | bool BitField = i->isBitField(); | ||||
2839 | |||||
2840 | // Ignore padding bit-fields. | ||||
2841 | if (BitField && i->isUnnamedBitfield()) | ||||
2842 | continue; | ||||
2843 | |||||
2844 | // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than | ||||
2845 | // four eightbytes, or it contains unaligned fields, it has class MEMORY. | ||||
2846 | // | ||||
2847 | // The only case a 256-bit wide vector could be used is when the struct | ||||
2848 | // contains a single 256-bit element. Since Lo and Hi logic isn't extended | ||||
2849 | // to work for sizes wider than 128, early check and fallback to memory. | ||||
2850 | // | ||||
2851 | if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) || | ||||
2852 | Size > getNativeVectorSizeForAVXABI(AVXLevel))) { | ||||
2853 | Lo = Memory; | ||||
2854 | postMerge(Size, Lo, Hi); | ||||
2855 | return; | ||||
2856 | } | ||||
2857 | // Note, skip this test for bit-fields, see below. | ||||
2858 | if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { | ||||
2859 | Lo = Memory; | ||||
2860 | postMerge(Size, Lo, Hi); | ||||
2861 | return; | ||||
2862 | } | ||||
2863 | |||||
2864 | // Classify this field. | ||||
2865 | // | ||||
2866 | // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate | ||||
2867 | // exceeds a single eightbyte, each is classified | ||||
2868 | // separately. Each eightbyte gets initialized to class | ||||
2869 | // NO_CLASS. | ||||
2870 | Class FieldLo, FieldHi; | ||||
2871 | |||||
2872 | // Bit-fields require special handling, they do not force the | ||||
2873 | // structure to be passed in memory even if unaligned, and | ||||
2874 | // therefore they can straddle an eightbyte. | ||||
2875 | if (BitField) { | ||||
2876 | assert(!i->isUnnamedBitfield())((!i->isUnnamedBitfield()) ? static_cast<void> (0) : __assert_fail ("!i->isUnnamedBitfield()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 2876, __PRETTY_FUNCTION__)); | ||||
2877 | uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); | ||||
2878 | uint64_t Size = i->getBitWidthValue(getContext()); | ||||
2879 | |||||
2880 | uint64_t EB_Lo = Offset / 64; | ||||
2881 | uint64_t EB_Hi = (Offset + Size - 1) / 64; | ||||
2882 | |||||
2883 | if (EB_Lo) { | ||||
2884 | assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.")((EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes." ) ? static_cast<void> (0) : __assert_fail ("EB_Hi == EB_Lo && \"Invalid classification, type > 16 bytes.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 2884, __PRETTY_FUNCTION__)); | ||||
2885 | FieldLo = NoClass; | ||||
2886 | FieldHi = Integer; | ||||
2887 | } else { | ||||
2888 | FieldLo = Integer; | ||||
2889 | FieldHi = EB_Hi ? Integer : NoClass; | ||||
2890 | } | ||||
2891 | } else | ||||
2892 | classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); | ||||
2893 | Lo = merge(Lo, FieldLo); | ||||
2894 | Hi = merge(Hi, FieldHi); | ||||
2895 | if (Lo == Memory || Hi == Memory) | ||||
2896 | break; | ||||
2897 | } | ||||
2898 | |||||
2899 | postMerge(Size, Lo, Hi); | ||||
2900 | } | ||||
2901 | } | ||||
2902 | |||||
2903 | ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { | ||||
2904 | // If this is a scalar LLVM value then assume LLVM will pass it in the right | ||||
2905 | // place naturally. | ||||
2906 | if (!isAggregateTypeForABI(Ty)) { | ||||
2907 | // Treat an enum type as its underlying type. | ||||
2908 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
2909 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
2910 | |||||
2911 | return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) | ||||
2912 | : ABIArgInfo::getDirect()); | ||||
2913 | } | ||||
2914 | |||||
2915 | return getNaturalAlignIndirect(Ty); | ||||
2916 | } | ||||
2917 | |||||
2918 | bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { | ||||
2919 | if (const VectorType *VecTy = Ty->getAs<VectorType>()) { | ||||
2920 | uint64_t Size = getContext().getTypeSize(VecTy); | ||||
2921 | unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); | ||||
2922 | if (Size <= 64 || Size > LargestVector) | ||||
2923 | return true; | ||||
2924 | QualType EltTy = VecTy->getElementType(); | ||||
2925 | if (passInt128VectorsInMem() && | ||||
2926 | (EltTy->isSpecificBuiltinType(BuiltinType::Int128) || | ||||
2927 | EltTy->isSpecificBuiltinType(BuiltinType::UInt128))) | ||||
2928 | return true; | ||||
2929 | } | ||||
2930 | |||||
2931 | return false; | ||||
2932 | } | ||||
2933 | |||||
2934 | ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, | ||||
2935 | unsigned freeIntRegs) const { | ||||
2936 | // If this is a scalar LLVM value then assume LLVM will pass it in the right | ||||
2937 | // place naturally. | ||||
2938 | // | ||||
2939 | // This assumption is optimistic, as there could be free registers available | ||||
2940 | // when we need to pass this argument in memory, and LLVM could try to pass | ||||
2941 | // the argument in the free register. This does not seem to happen currently, | ||||
2942 | // but this code would be much safer if we could mark the argument with | ||||
2943 | // 'onstack'. See PR12193. | ||||
2944 | if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { | ||||
2945 | // Treat an enum type as its underlying type. | ||||
2946 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
2947 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
2948 | |||||
2949 | return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) | ||||
2950 | : ABIArgInfo::getDirect()); | ||||
2951 | } | ||||
2952 | |||||
2953 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) | ||||
2954 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
2955 | |||||
2956 | // Compute the byval alignment. We specify the alignment of the byval in all | ||||
2957 | // cases so that the mid-level optimizer knows the alignment of the byval. | ||||
2958 | unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); | ||||
2959 | |||||
2960 | // Attempt to avoid passing indirect results using byval when possible. This | ||||
2961 | // is important for good codegen. | ||||
2962 | // | ||||
2963 | // We do this by coercing the value into a scalar type which the backend can | ||||
2964 | // handle naturally (i.e., without using byval). | ||||
2965 | // | ||||
2966 | // For simplicity, we currently only do this when we have exhausted all of the | ||||
2967 | // free integer registers. Doing this when there are free integer registers | ||||
2968 | // would require more care, as we would have to ensure that the coerced value | ||||
2969 | // did not claim the unused register. That would require either reording the | ||||
2970 | // arguments to the function (so that any subsequent inreg values came first), | ||||
2971 | // or only doing this optimization when there were no following arguments that | ||||
2972 | // might be inreg. | ||||
2973 | // | ||||
2974 | // We currently expect it to be rare (particularly in well written code) for | ||||
2975 | // arguments to be passed on the stack when there are still free integer | ||||
2976 | // registers available (this would typically imply large structs being passed | ||||
2977 | // by value), so this seems like a fair tradeoff for now. | ||||
2978 | // | ||||
2979 | // We can revisit this if the backend grows support for 'onstack' parameter | ||||
2980 | // attributes. See PR12193. | ||||
2981 | if (freeIntRegs == 0) { | ||||
2982 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
2983 | |||||
2984 | // If this type fits in an eightbyte, coerce it into the matching integral | ||||
2985 | // type, which will end up on the stack (with alignment 8). | ||||
2986 | if (Align == 8 && Size <= 64) | ||||
2987 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), | ||||
2988 | Size)); | ||||
2989 | } | ||||
2990 | |||||
2991 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); | ||||
2992 | } | ||||
2993 | |||||
2994 | /// The ABI specifies that a value should be passed in a full vector XMM/YMM | ||||
2995 | /// register. Pick an LLVM IR type that will be passed as a vector register. | ||||
2996 | llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { | ||||
2997 | // Wrapper structs/arrays that only contain vectors are passed just like | ||||
2998 | // vectors; strip them off if present. | ||||
2999 | if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) | ||||
3000 | Ty = QualType(InnerTy, 0); | ||||
3001 | |||||
3002 | llvm::Type *IRType = CGT.ConvertType(Ty); | ||||
3003 | if (isa<llvm::VectorType>(IRType)) { | ||||
3004 | // Don't pass vXi128 vectors in their native type, the backend can't | ||||
3005 | // legalize them. | ||||
3006 | if (passInt128VectorsInMem() && | ||||
3007 | IRType->getVectorElementType()->isIntegerTy(128)) { | ||||
3008 | // Use a vXi64 vector. | ||||
3009 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
3010 | return llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), | ||||
3011 | Size / 64); | ||||
3012 | } | ||||
3013 | |||||
3014 | return IRType; | ||||
3015 | } | ||||
3016 | |||||
3017 | if (IRType->getTypeID() == llvm::Type::FP128TyID) | ||||
3018 | return IRType; | ||||
3019 | |||||
3020 | // We couldn't find the preferred IR vector type for 'Ty'. | ||||
3021 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
3022 | assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!")(((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!" ) ? static_cast<void> (0) : __assert_fail ("(Size == 128 || Size == 256 || Size == 512) && \"Invalid type found!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3022, __PRETTY_FUNCTION__)); | ||||
3023 | |||||
3024 | |||||
3025 | // Return a LLVM IR vector type based on the size of 'Ty'. | ||||
3026 | return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), | ||||
3027 | Size / 64); | ||||
3028 | } | ||||
3029 | |||||
3030 | /// BitsContainNoUserData - Return true if the specified [start,end) bit range | ||||
3031 | /// is known to either be off the end of the specified type or being in | ||||
3032 | /// alignment padding. The user type specified is known to be at most 128 bits | ||||
3033 | /// in size, and have passed through X86_64ABIInfo::classify with a successful | ||||
3034 | /// classification that put one of the two halves in the INTEGER class. | ||||
3035 | /// | ||||
3036 | /// It is conservatively correct to return false. | ||||
3037 | static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, | ||||
3038 | unsigned EndBit, ASTContext &Context) { | ||||
3039 | // If the bytes being queried are off the end of the type, there is no user | ||||
3040 | // data hiding here. This handles analysis of builtins, vectors and other | ||||
3041 | // types that don't contain interesting padding. | ||||
3042 | unsigned TySize = (unsigned)Context.getTypeSize(Ty); | ||||
3043 | if (TySize <= StartBit) | ||||
3044 | return true; | ||||
3045 | |||||
3046 | if (const ConstantArrayType *AT
| ||||
3047 | unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); | ||||
3048 | unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); | ||||
3049 | |||||
3050 | // Check each element to see if the element overlaps with the queried range. | ||||
3051 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
3052 | // If the element is after the span we care about, then we're done.. | ||||
3053 | unsigned EltOffset = i*EltSize; | ||||
3054 | if (EltOffset >= EndBit) break; | ||||
3055 | |||||
3056 | unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; | ||||
3057 | if (!BitsContainNoUserData(AT->getElementType(), EltStart, | ||||
3058 | EndBit-EltOffset, Context)) | ||||
3059 | return false; | ||||
3060 | } | ||||
3061 | // If it overlaps no elements, then it is safe to process as padding. | ||||
3062 | return true; | ||||
3063 | } | ||||
3064 | |||||
3065 | if (const RecordType *RT
| ||||
3066 | const RecordDecl *RD = RT->getDecl(); | ||||
3067 | const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); | ||||
3068 | |||||
3069 | // If this is a C++ record, check the bases first. | ||||
3070 | if (const CXXRecordDecl *CXXRD
| ||||
3071 | for (const auto &I : CXXRD->bases()) { | ||||
3072 | assert(!I.isVirtual() && !I.getType()->isDependentType() &&((!I.isVirtual() && !I.getType()->isDependentType( ) && "Unexpected base class!") ? static_cast<void> (0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3073, __PRETTY_FUNCTION__)) | ||||
3073 | "Unexpected base class!")((!I.isVirtual() && !I.getType()->isDependentType( ) && "Unexpected base class!") ? static_cast<void> (0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3073, __PRETTY_FUNCTION__)); | ||||
3074 | const CXXRecordDecl *Base = | ||||
3075 | cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); | ||||
| |||||
3076 | |||||
3077 | // If the base is after the span we care about, ignore it. | ||||
3078 | unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); | ||||
3079 | if (BaseOffset >= EndBit) continue; | ||||
3080 | |||||
3081 | unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; | ||||
3082 | if (!BitsContainNoUserData(I.getType(), BaseStart, | ||||
3083 | EndBit-BaseOffset, Context)) | ||||
3084 | return false; | ||||
3085 | } | ||||
3086 | } | ||||
3087 | |||||
3088 | // Verify that no field has data that overlaps the region of interest. Yes | ||||
3089 | // this could be sped up a lot by being smarter about queried fields, | ||||
3090 | // however we're only looking at structs up to 16 bytes, so we don't care | ||||
3091 | // much. | ||||
3092 | unsigned idx = 0; | ||||
3093 | for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); | ||||
3094 | i != e; ++i, ++idx) { | ||||
3095 | unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); | ||||
3096 | |||||
3097 | // If we found a field after the region we care about, then we're done. | ||||
3098 | if (FieldOffset >= EndBit) break; | ||||
3099 | |||||
3100 | unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; | ||||
3101 | if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, | ||||
3102 | Context)) | ||||
3103 | return false; | ||||
3104 | } | ||||
3105 | |||||
3106 | // If nothing in this record overlapped the area of interest, then we're | ||||
3107 | // clean. | ||||
3108 | return true; | ||||
3109 | } | ||||
3110 | |||||
3111 | return false; | ||||
3112 | } | ||||
3113 | |||||
3114 | /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a | ||||
3115 | /// float member at the specified offset. For example, {int,{float}} has a | ||||
3116 | /// float at offset 4. It is conservatively correct for this routine to return | ||||
3117 | /// false. | ||||
3118 | static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, | ||||
3119 | const llvm::DataLayout &TD) { | ||||
3120 | // Base case if we find a float. | ||||
3121 | if (IROffset == 0 && IRType->isFloatTy()) | ||||
3122 | return true; | ||||
3123 | |||||
3124 | // If this is a struct, recurse into the field at the specified offset. | ||||
3125 | if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { | ||||
3126 | const llvm::StructLayout *SL = TD.getStructLayout(STy); | ||||
3127 | unsigned Elt = SL->getElementContainingOffset(IROffset); | ||||
3128 | IROffset -= SL->getElementOffset(Elt); | ||||
3129 | return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); | ||||
3130 | } | ||||
3131 | |||||
3132 | // If this is an array, recurse into the field at the specified offset. | ||||
3133 | if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { | ||||
3134 | llvm::Type *EltTy = ATy->getElementType(); | ||||
3135 | unsigned EltSize = TD.getTypeAllocSize(EltTy); | ||||
3136 | IROffset -= IROffset/EltSize*EltSize; | ||||
3137 | return ContainsFloatAtOffset(EltTy, IROffset, TD); | ||||
3138 | } | ||||
3139 | |||||
3140 | return false; | ||||
3141 | } | ||||
3142 | |||||
3143 | |||||
3144 | /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the | ||||
3145 | /// low 8 bytes of an XMM register, corresponding to the SSE class. | ||||
3146 | llvm::Type *X86_64ABIInfo:: | ||||
3147 | GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, | ||||
3148 | QualType SourceTy, unsigned SourceOffset) const { | ||||
3149 | // The only three choices we have are either double, <2 x float>, or float. We | ||||
3150 | // pass as float if the last 4 bytes is just padding. This happens for | ||||
3151 | // structs that contain 3 floats. | ||||
3152 | if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, | ||||
3153 | SourceOffset*8+64, getContext())) | ||||
3154 | return llvm::Type::getFloatTy(getVMContext()); | ||||
3155 | |||||
3156 | // We want to pass as <2 x float> if the LLVM IR type contains a float at | ||||
3157 | // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the | ||||
3158 | // case. | ||||
3159 | if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && | ||||
3160 | ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) | ||||
3161 | return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); | ||||
3162 | |||||
3163 | return llvm::Type::getDoubleTy(getVMContext()); | ||||
3164 | } | ||||
3165 | |||||
3166 | |||||
3167 | /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in | ||||
3168 | /// an 8-byte GPR. This means that we either have a scalar or we are talking | ||||
3169 | /// about the high or low part of an up-to-16-byte struct. This routine picks | ||||
3170 | /// the best LLVM IR type to represent this, which may be i64 or may be anything | ||||
3171 | /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, | ||||
3172 | /// etc). | ||||
3173 | /// | ||||
3174 | /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for | ||||
3175 | /// the source type. IROffset is an offset in bytes into the LLVM IR type that | ||||
3176 | /// the 8-byte value references. PrefType may be null. | ||||
3177 | /// | ||||
3178 | /// SourceTy is the source-level type for the entire argument. SourceOffset is | ||||
3179 | /// an offset into this that we're processing (which is always either 0 or 8). | ||||
3180 | /// | ||||
3181 | llvm::Type *X86_64ABIInfo:: | ||||
3182 | GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, | ||||
3183 | QualType SourceTy, unsigned SourceOffset) const { | ||||
3184 | // If we're dealing with an un-offset LLVM IR type, then it means that we're | ||||
3185 | // returning an 8-byte unit starting with it. See if we can safely use it. | ||||
3186 | if (IROffset
| ||||
3187 | // Pointers and int64's always fill the 8-byte unit. | ||||
3188 | if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || | ||||
3189 | IRType->isIntegerTy(64)) | ||||
3190 | return IRType; | ||||
3191 | |||||
3192 | // If we have a 1/2/4-byte integer, we can use it only if the rest of the | ||||
3193 | // goodness in the source type is just tail padding. This is allowed to | ||||
3194 | // kick in for struct {double,int} on the int, but not on | ||||
3195 | // struct{double,int,int} because we wouldn't return the second int. We | ||||
3196 | // have to do this analysis on the source type because we can't depend on | ||||
3197 | // unions being lowered a specific way etc. | ||||
3198 | if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || | ||||
3199 | IRType->isIntegerTy(32) || | ||||
3200 | (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { | ||||
3201 | unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : | ||||
3202 | cast<llvm::IntegerType>(IRType)->getBitWidth(); | ||||
3203 | |||||
3204 | if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, | ||||
3205 | SourceOffset*8+64, getContext())) | ||||
3206 | return IRType; | ||||
3207 | } | ||||
3208 | } | ||||
3209 | |||||
3210 | if (llvm::StructType *STy
| ||||
3211 | // If this is a struct, recurse into the field at the specified offset. | ||||
3212 | const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); | ||||
3213 | if (IROffset < SL->getSizeInBytes()) { | ||||
3214 | unsigned FieldIdx = SL->getElementContainingOffset(IROffset); | ||||
3215 | IROffset -= SL->getElementOffset(FieldIdx); | ||||
3216 | |||||
3217 | return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, | ||||
3218 | SourceTy, SourceOffset); | ||||
3219 | } | ||||
3220 | } | ||||
3221 | |||||
3222 | if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { | ||||
3223 | llvm::Type *EltTy = ATy->getElementType(); | ||||
3224 | unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); | ||||
3225 | unsigned EltOffset = IROffset/EltSize*EltSize; | ||||
3226 | return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, | ||||
3227 | SourceOffset); | ||||
3228 | } | ||||
3229 | |||||
3230 | // Okay, we don't have any better idea of what to pass, so we pass this in an | ||||
3231 | // integer register that isn't too big to fit the rest of the struct. | ||||
3232 | unsigned TySizeInBytes = | ||||
3233 | (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); | ||||
3234 | |||||
3235 | assert(TySizeInBytes != SourceOffset && "Empty field?")((TySizeInBytes != SourceOffset && "Empty field?") ? static_cast <void> (0) : __assert_fail ("TySizeInBytes != SourceOffset && \"Empty field?\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3235, __PRETTY_FUNCTION__)); | ||||
3236 | |||||
3237 | // It is always safe to classify this as an integer type up to i64 that | ||||
3238 | // isn't larger than the structure. | ||||
3239 | return llvm::IntegerType::get(getVMContext(), | ||||
3240 | std::min(TySizeInBytes-SourceOffset, 8U)*8); | ||||
3241 | } | ||||
3242 | |||||
3243 | |||||
3244 | /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally | ||||
3245 | /// be used as elements of a two register pair to pass or return, return a | ||||
3246 | /// first class aggregate to represent them. For example, if the low part of | ||||
3247 | /// a by-value argument should be passed as i32* and the high part as float, | ||||
3248 | /// return {i32*, float}. | ||||
3249 | static llvm::Type * | ||||
3250 | GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, | ||||
3251 | const llvm::DataLayout &TD) { | ||||
3252 | // In order to correctly satisfy the ABI, we need to the high part to start | ||||
3253 | // at offset 8. If the high and low parts we inferred are both 4-byte types | ||||
3254 | // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have | ||||
3255 | // the second element at offset 8. Check for this: | ||||
3256 | unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); | ||||
3257 | unsigned HiAlign = TD.getABITypeAlignment(Hi); | ||||
3258 | unsigned HiStart = llvm::alignTo(LoSize, HiAlign); | ||||
3259 | assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!")((HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!" ) ? static_cast<void> (0) : __assert_fail ("HiStart != 0 && HiStart <= 8 && \"Invalid x86-64 argument pair!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3259, __PRETTY_FUNCTION__)); | ||||
3260 | |||||
3261 | // To handle this, we have to increase the size of the low part so that the | ||||
3262 | // second element will start at an 8 byte offset. We can't increase the size | ||||
3263 | // of the second element because it might make us access off the end of the | ||||
3264 | // struct. | ||||
3265 | if (HiStart != 8) { | ||||
3266 | // There are usually two sorts of types the ABI generation code can produce | ||||
3267 | // for the low part of a pair that aren't 8 bytes in size: float or | ||||
3268 | // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and | ||||
3269 | // NaCl). | ||||
3270 | // Promote these to a larger type. | ||||
3271 | if (Lo->isFloatTy()) | ||||
3272 | Lo = llvm::Type::getDoubleTy(Lo->getContext()); | ||||
3273 | else { | ||||
3274 | assert((Lo->isIntegerTy() || Lo->isPointerTy())(((Lo->isIntegerTy() || Lo->isPointerTy()) && "Invalid/unknown lo type" ) ? static_cast<void> (0) : __assert_fail ("(Lo->isIntegerTy() || Lo->isPointerTy()) && \"Invalid/unknown lo type\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3275, __PRETTY_FUNCTION__)) | ||||
3275 | && "Invalid/unknown lo type")(((Lo->isIntegerTy() || Lo->isPointerTy()) && "Invalid/unknown lo type" ) ? static_cast<void> (0) : __assert_fail ("(Lo->isIntegerTy() || Lo->isPointerTy()) && \"Invalid/unknown lo type\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3275, __PRETTY_FUNCTION__)); | ||||
3276 | Lo = llvm::Type::getInt64Ty(Lo->getContext()); | ||||
3277 | } | ||||
3278 | } | ||||
3279 | |||||
3280 | llvm::StructType *Result = llvm::StructType::get(Lo, Hi); | ||||
3281 | |||||
3282 | // Verify that the second element is at an 8-byte offset. | ||||
3283 | assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&((TD.getStructLayout(Result)->getElementOffset(1) == 8 && "Invalid x86-64 argument pair!") ? static_cast<void> ( 0) : __assert_fail ("TD.getStructLayout(Result)->getElementOffset(1) == 8 && \"Invalid x86-64 argument pair!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3284, __PRETTY_FUNCTION__)) | ||||
3284 | "Invalid x86-64 argument pair!")((TD.getStructLayout(Result)->getElementOffset(1) == 8 && "Invalid x86-64 argument pair!") ? static_cast<void> ( 0) : __assert_fail ("TD.getStructLayout(Result)->getElementOffset(1) == 8 && \"Invalid x86-64 argument pair!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3284, __PRETTY_FUNCTION__)); | ||||
3285 | return Result; | ||||
3286 | } | ||||
3287 | |||||
3288 | ABIArgInfo X86_64ABIInfo:: | ||||
3289 | classifyReturnType(QualType RetTy) const { | ||||
3290 | // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the | ||||
3291 | // classification algorithm. | ||||
3292 | X86_64ABIInfo::Class Lo, Hi; | ||||
3293 | classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); | ||||
3294 | |||||
3295 | // Check some invariants. | ||||
3296 | assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.")(((Hi != Memory || Lo == Memory) && "Invalid memory classification." ) ? static_cast<void> (0) : __assert_fail ("(Hi != Memory || Lo == Memory) && \"Invalid memory classification.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3296, __PRETTY_FUNCTION__)); | ||||
3297 | assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.")(((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification." ) ? static_cast<void> (0) : __assert_fail ("(Hi != SSEUp || Lo == SSE) && \"Invalid SSEUp classification.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3297, __PRETTY_FUNCTION__)); | ||||
3298 | |||||
3299 | llvm::Type *ResType = nullptr; | ||||
3300 | switch (Lo) { | ||||
3301 | case NoClass: | ||||
3302 | if (Hi == NoClass) | ||||
3303 | return ABIArgInfo::getIgnore(); | ||||
3304 | // If the low part is just padding, it takes no register, leave ResType | ||||
3305 | // null. | ||||
3306 | assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part" ) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3307, __PRETTY_FUNCTION__)) | ||||
3307 | "Unknown missing lo part")(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part" ) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3307, __PRETTY_FUNCTION__)); | ||||
3308 | break; | ||||
3309 | |||||
3310 | case SSEUp: | ||||
3311 | case X87Up: | ||||
3312 | llvm_unreachable("Invalid classification for lo word.")::llvm::llvm_unreachable_internal("Invalid classification for lo word." , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3312); | ||||
3313 | |||||
3314 | // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via | ||||
3315 | // hidden argument. | ||||
3316 | case Memory: | ||||
3317 | return getIndirectReturnResult(RetTy); | ||||
3318 | |||||
3319 | // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next | ||||
3320 | // available register of the sequence %rax, %rdx is used. | ||||
3321 | case Integer: | ||||
3322 | ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); | ||||
3323 | |||||
3324 | // If we have a sign or zero extended integer, make sure to return Extend | ||||
3325 | // so that the parameter gets the right LLVM IR attributes. | ||||
3326 | if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { | ||||
3327 | // Treat an enum type as its underlying type. | ||||
3328 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) | ||||
3329 | RetTy = EnumTy->getDecl()->getIntegerType(); | ||||
3330 | |||||
3331 | if (RetTy->isIntegralOrEnumerationType() && | ||||
3332 | RetTy->isPromotableIntegerType()) | ||||
3333 | return ABIArgInfo::getExtend(RetTy); | ||||
3334 | } | ||||
3335 | break; | ||||
3336 | |||||
3337 | // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next | ||||
3338 | // available SSE register of the sequence %xmm0, %xmm1 is used. | ||||
3339 | case SSE: | ||||
3340 | ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); | ||||
3341 | break; | ||||
3342 | |||||
3343 | // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is | ||||
3344 | // returned on the X87 stack in %st0 as 80-bit x87 number. | ||||
3345 | case X87: | ||||
3346 | ResType = llvm::Type::getX86_FP80Ty(getVMContext()); | ||||
3347 | break; | ||||
3348 | |||||
3349 | // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real | ||||
3350 | // part of the value is returned in %st0 and the imaginary part in | ||||
3351 | // %st1. | ||||
3352 | case ComplexX87: | ||||
3353 | assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.")((Hi == ComplexX87 && "Unexpected ComplexX87 classification." ) ? static_cast<void> (0) : __assert_fail ("Hi == ComplexX87 && \"Unexpected ComplexX87 classification.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3353, __PRETTY_FUNCTION__)); | ||||
3354 | ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), | ||||
3355 | llvm::Type::getX86_FP80Ty(getVMContext())); | ||||
3356 | break; | ||||
3357 | } | ||||
3358 | |||||
3359 | llvm::Type *HighPart = nullptr; | ||||
3360 | switch (Hi) { | ||||
3361 | // Memory was handled previously and X87 should | ||||
3362 | // never occur as a hi class. | ||||
3363 | case Memory: | ||||
3364 | case X87: | ||||
3365 | llvm_unreachable("Invalid classification for hi word.")::llvm::llvm_unreachable_internal("Invalid classification for hi word." , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3365); | ||||
3366 | |||||
3367 | case ComplexX87: // Previously handled. | ||||
3368 | case NoClass: | ||||
3369 | break; | ||||
3370 | |||||
3371 | case Integer: | ||||
3372 | HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); | ||||
3373 | if (Lo == NoClass) // Return HighPart at offset 8 in memory. | ||||
3374 | return ABIArgInfo::getDirect(HighPart, 8); | ||||
3375 | break; | ||||
3376 | case SSE: | ||||
3377 | HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); | ||||
3378 | if (Lo == NoClass) // Return HighPart at offset 8 in memory. | ||||
3379 | return ABIArgInfo::getDirect(HighPart, 8); | ||||
3380 | break; | ||||
3381 | |||||
3382 | // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte | ||||
3383 | // is passed in the next available eightbyte chunk if the last used | ||||
3384 | // vector register. | ||||
3385 | // | ||||
3386 | // SSEUP should always be preceded by SSE, just widen. | ||||
3387 | case SSEUp: | ||||
3388 | assert(Lo == SSE && "Unexpected SSEUp classification.")((Lo == SSE && "Unexpected SSEUp classification.") ? static_cast <void> (0) : __assert_fail ("Lo == SSE && \"Unexpected SSEUp classification.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3388, __PRETTY_FUNCTION__)); | ||||
3389 | ResType = GetByteVectorType(RetTy); | ||||
3390 | break; | ||||
3391 | |||||
3392 | // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is | ||||
3393 | // returned together with the previous X87 value in %st0. | ||||
3394 | case X87Up: | ||||
3395 | // If X87Up is preceded by X87, we don't need to do | ||||
3396 | // anything. However, in some cases with unions it may not be | ||||
3397 | // preceded by X87. In such situations we follow gcc and pass the | ||||
3398 | // extra bits in an SSE reg. | ||||
3399 | if (Lo != X87) { | ||||
3400 | HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); | ||||
3401 | if (Lo == NoClass) // Return HighPart at offset 8 in memory. | ||||
3402 | return ABIArgInfo::getDirect(HighPart, 8); | ||||
3403 | } | ||||
3404 | break; | ||||
3405 | } | ||||
3406 | |||||
3407 | // If a high part was specified, merge it together with the low part. It is | ||||
3408 | // known to pass in the high eightbyte of the result. We do this by forming a | ||||
3409 | // first class struct aggregate with the high and low part: {low, high} | ||||
3410 | if (HighPart) | ||||
3411 | ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); | ||||
3412 | |||||
3413 | return ABIArgInfo::getDirect(ResType); | ||||
3414 | } | ||||
3415 | |||||
3416 | ABIArgInfo X86_64ABIInfo::classifyArgumentType( | ||||
3417 | QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, | ||||
3418 | bool isNamedArg) | ||||
3419 | const | ||||
3420 | { | ||||
3421 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
3422 | |||||
3423 | X86_64ABIInfo::Class Lo, Hi; | ||||
3424 | classify(Ty, 0, Lo, Hi, isNamedArg); | ||||
3425 | |||||
3426 | // Check some invariants. | ||||
3427 | // FIXME: Enforce these by construction. | ||||
3428 | assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.")(((Hi != Memory || Lo == Memory) && "Invalid memory classification." ) ? static_cast<void> (0) : __assert_fail ("(Hi != Memory || Lo == Memory) && \"Invalid memory classification.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3428, __PRETTY_FUNCTION__)); | ||||
3429 | assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.")(((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification." ) ? static_cast<void> (0) : __assert_fail ("(Hi != SSEUp || Lo == SSE) && \"Invalid SSEUp classification.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3429, __PRETTY_FUNCTION__)); | ||||
3430 | |||||
3431 | neededInt = 0; | ||||
3432 | neededSSE = 0; | ||||
3433 | llvm::Type *ResType = nullptr; | ||||
3434 | switch (Lo) { | ||||
3435 | case NoClass: | ||||
3436 | if (Hi == NoClass) | ||||
3437 | return ABIArgInfo::getIgnore(); | ||||
3438 | // If the low part is just padding, it takes no register, leave ResType | ||||
3439 | // null. | ||||
3440 | assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part" ) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3441, __PRETTY_FUNCTION__)) | ||||
3441 | "Unknown missing lo part")(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part" ) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3441, __PRETTY_FUNCTION__)); | ||||
3442 | break; | ||||
3443 | |||||
3444 | // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument | ||||
3445 | // on the stack. | ||||
3446 | case Memory: | ||||
3447 | |||||
3448 | // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or | ||||
3449 | // COMPLEX_X87, it is passed in memory. | ||||
3450 | case X87: | ||||
3451 | case ComplexX87: | ||||
3452 | if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) | ||||
3453 | ++neededInt; | ||||
3454 | return getIndirectResult(Ty, freeIntRegs); | ||||
3455 | |||||
3456 | case SSEUp: | ||||
3457 | case X87Up: | ||||
3458 | llvm_unreachable("Invalid classification for lo word.")::llvm::llvm_unreachable_internal("Invalid classification for lo word." , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3458); | ||||
3459 | |||||
3460 | // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next | ||||
3461 | // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 | ||||
3462 | // and %r9 is used. | ||||
3463 | case Integer: | ||||
3464 | ++neededInt; | ||||
3465 | |||||
3466 | // Pick an 8-byte type based on the preferred type. | ||||
3467 | ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); | ||||
3468 | |||||
3469 | // If we have a sign or zero extended integer, make sure to return Extend | ||||
3470 | // so that the parameter gets the right LLVM IR attributes. | ||||
3471 | if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { | ||||
3472 | // Treat an enum type as its underlying type. | ||||
3473 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
3474 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
3475 | |||||
3476 | if (Ty->isIntegralOrEnumerationType() && | ||||
3477 | Ty->isPromotableIntegerType()) | ||||
3478 | return ABIArgInfo::getExtend(Ty); | ||||
3479 | } | ||||
3480 | |||||
3481 | break; | ||||
3482 | |||||
3483 | // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next | ||||
3484 | // available SSE register is used, the registers are taken in the | ||||
3485 | // order from %xmm0 to %xmm7. | ||||
3486 | case SSE: { | ||||
3487 | llvm::Type *IRType = CGT.ConvertType(Ty); | ||||
3488 | ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); | ||||
3489 | ++neededSSE; | ||||
3490 | break; | ||||
3491 | } | ||||
3492 | } | ||||
3493 | |||||
3494 | llvm::Type *HighPart = nullptr; | ||||
3495 | switch (Hi) { | ||||
3496 | // Memory was handled previously, ComplexX87 and X87 should | ||||
3497 | // never occur as hi classes, and X87Up must be preceded by X87, | ||||
3498 | // which is passed in memory. | ||||
3499 | case Memory: | ||||
3500 | case X87: | ||||
3501 | case ComplexX87: | ||||
3502 | llvm_unreachable("Invalid classification for hi word.")::llvm::llvm_unreachable_internal("Invalid classification for hi word." , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3502); | ||||
3503 | |||||
3504 | case NoClass: break; | ||||
3505 | |||||
3506 | case Integer: | ||||
3507 | ++neededInt; | ||||
3508 | // Pick an 8-byte type based on the preferred type. | ||||
3509 | HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); | ||||
3510 | |||||
3511 | if (Lo == NoClass) // Pass HighPart at offset 8 in memory. | ||||
3512 | return ABIArgInfo::getDirect(HighPart, 8); | ||||
3513 | break; | ||||
3514 | |||||
3515 | // X87Up generally doesn't occur here (long double is passed in | ||||
3516 | // memory), except in situations involving unions. | ||||
3517 | case X87Up: | ||||
3518 | case SSE: | ||||
3519 | HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); | ||||
3520 | |||||
3521 | if (Lo == NoClass) // Pass HighPart at offset 8 in memory. | ||||
3522 | return ABIArgInfo::getDirect(HighPart, 8); | ||||
3523 | |||||
3524 | ++neededSSE; | ||||
3525 | break; | ||||
3526 | |||||
3527 | // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the | ||||
3528 | // eightbyte is passed in the upper half of the last used SSE | ||||
3529 | // register. This only happens when 128-bit vectors are passed. | ||||
3530 | case SSEUp: | ||||
3531 | assert(Lo == SSE && "Unexpected SSEUp classification")((Lo == SSE && "Unexpected SSEUp classification") ? static_cast <void> (0) : __assert_fail ("Lo == SSE && \"Unexpected SSEUp classification\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3531, __PRETTY_FUNCTION__)); | ||||
3532 | ResType = GetByteVectorType(Ty); | ||||
3533 | break; | ||||
3534 | } | ||||
3535 | |||||
3536 | // If a high part was specified, merge it together with the low part. It is | ||||
3537 | // known to pass in the high eightbyte of the result. We do this by forming a | ||||
3538 | // first class struct aggregate with the high and low part: {low, high} | ||||
3539 | if (HighPart) | ||||
3540 | ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); | ||||
3541 | |||||
3542 | return ABIArgInfo::getDirect(ResType); | ||||
3543 | } | ||||
3544 | |||||
3545 | ABIArgInfo | ||||
3546 | X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, | ||||
3547 | unsigned &NeededSSE) const { | ||||
3548 | auto RT = Ty->getAs<RecordType>(); | ||||
3549 | assert(RT && "classifyRegCallStructType only valid with struct types")((RT && "classifyRegCallStructType only valid with struct types" ) ? static_cast<void> (0) : __assert_fail ("RT && \"classifyRegCallStructType only valid with struct types\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3549, __PRETTY_FUNCTION__)); | ||||
3550 | |||||
3551 | if (RT->getDecl()->hasFlexibleArrayMember()) | ||||
3552 | return getIndirectReturnResult(Ty); | ||||
3553 | |||||
3554 | // Sum up bases | ||||
3555 | if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { | ||||
3556 | if (CXXRD->isDynamicClass()) { | ||||
3557 | NeededInt = NeededSSE = 0; | ||||
3558 | return getIndirectReturnResult(Ty); | ||||
3559 | } | ||||
3560 | |||||
3561 | for (const auto &I : CXXRD->bases()) | ||||
3562 | if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE) | ||||
3563 | .isIndirect()) { | ||||
3564 | NeededInt = NeededSSE = 0; | ||||
3565 | return getIndirectReturnResult(Ty); | ||||
3566 | } | ||||
3567 | } | ||||
3568 | |||||
3569 | // Sum up members | ||||
3570 | for (const auto *FD : RT->getDecl()->fields()) { | ||||
3571 | if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) { | ||||
3572 | if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE) | ||||
3573 | .isIndirect()) { | ||||
3574 | NeededInt = NeededSSE = 0; | ||||
3575 | return getIndirectReturnResult(Ty); | ||||
3576 | } | ||||
3577 | } else { | ||||
3578 | unsigned LocalNeededInt, LocalNeededSSE; | ||||
3579 | if (classifyArgumentType(FD->getType(), UINT_MAX(2147483647 *2U +1U), LocalNeededInt, | ||||
3580 | LocalNeededSSE, true) | ||||
3581 | .isIndirect()) { | ||||
3582 | NeededInt = NeededSSE = 0; | ||||
3583 | return getIndirectReturnResult(Ty); | ||||
3584 | } | ||||
3585 | NeededInt += LocalNeededInt; | ||||
3586 | NeededSSE += LocalNeededSSE; | ||||
3587 | } | ||||
3588 | } | ||||
3589 | |||||
3590 | return ABIArgInfo::getDirect(); | ||||
3591 | } | ||||
3592 | |||||
3593 | ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty, | ||||
3594 | unsigned &NeededInt, | ||||
3595 | unsigned &NeededSSE) const { | ||||
3596 | |||||
3597 | NeededInt = 0; | ||||
3598 | NeededSSE = 0; | ||||
3599 | |||||
3600 | return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE); | ||||
3601 | } | ||||
3602 | |||||
3603 | void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
3604 | |||||
3605 | const unsigned CallingConv = FI.getCallingConvention(); | ||||
3606 | // It is possible to force Win64 calling convention on any x86_64 target by | ||||
3607 | // using __attribute__((ms_abi)). In such case to correctly emit Win64 | ||||
3608 | // compatible code delegate this call to WinX86_64ABIInfo::computeInfo. | ||||
3609 | if (CallingConv == llvm::CallingConv::Win64) { | ||||
3610 | WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel); | ||||
3611 | Win64ABIInfo.computeInfo(FI); | ||||
3612 | return; | ||||
3613 | } | ||||
3614 | |||||
3615 | bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; | ||||
3616 | |||||
3617 | // Keep track of the number of assigned registers. | ||||
3618 | unsigned FreeIntRegs = IsRegCall ? 11 : 6; | ||||
3619 | unsigned FreeSSERegs = IsRegCall ? 16 : 8; | ||||
3620 | unsigned NeededInt, NeededSSE; | ||||
3621 | |||||
3622 | if (!::classifyReturnType(getCXXABI(), FI, *this)) { | ||||
3623 | if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() && | ||||
3624 | !FI.getReturnType()->getTypePtr()->isUnionType()) { | ||||
3625 | FI.getReturnInfo() = | ||||
3626 | classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE); | ||||
3627 | if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { | ||||
3628 | FreeIntRegs -= NeededInt; | ||||
3629 | FreeSSERegs -= NeededSSE; | ||||
3630 | } else { | ||||
3631 | FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); | ||||
3632 | } | ||||
3633 | } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) { | ||||
3634 | // Complex Long Double Type is passed in Memory when Regcall | ||||
3635 | // calling convention is used. | ||||
3636 | const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>(); | ||||
3637 | if (getContext().getCanonicalType(CT->getElementType()) == | ||||
3638 | getContext().LongDoubleTy) | ||||
3639 | FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); | ||||
3640 | } else | ||||
3641 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
3642 | } | ||||
3643 | |||||
3644 | // If the return value is indirect, then the hidden argument is consuming one | ||||
3645 | // integer register. | ||||
3646 | if (FI.getReturnInfo().isIndirect()) | ||||
3647 | --FreeIntRegs; | ||||
3648 | |||||
3649 | // The chain argument effectively gives us another free register. | ||||
3650 | if (FI.isChainCall()) | ||||
3651 | ++FreeIntRegs; | ||||
3652 | |||||
3653 | unsigned NumRequiredArgs = FI.getNumRequiredArgs(); | ||||
3654 | // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers | ||||
3655 | // get assigned (in left-to-right order) for passing as follows... | ||||
3656 | unsigned ArgNo = 0; | ||||
3657 | for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); | ||||
3658 | it != ie; ++it, ++ArgNo) { | ||||
3659 | bool IsNamedArg = ArgNo < NumRequiredArgs; | ||||
3660 | |||||
3661 | if (IsRegCall && it->type->isStructureOrClassType()) | ||||
3662 | it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE); | ||||
3663 | else | ||||
3664 | it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, | ||||
3665 | NeededSSE, IsNamedArg); | ||||
3666 | |||||
3667 | // AMD64-ABI 3.2.3p3: If there are no registers available for any | ||||
3668 | // eightbyte of an argument, the whole argument is passed on the | ||||
3669 | // stack. If registers have already been assigned for some | ||||
3670 | // eightbytes of such an argument, the assignments get reverted. | ||||
3671 | if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { | ||||
3672 | FreeIntRegs -= NeededInt; | ||||
3673 | FreeSSERegs -= NeededSSE; | ||||
3674 | } else { | ||||
3675 | it->info = getIndirectResult(it->type, FreeIntRegs); | ||||
3676 | } | ||||
3677 | } | ||||
3678 | } | ||||
3679 | |||||
3680 | static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, | ||||
3681 | Address VAListAddr, QualType Ty) { | ||||
3682 | Address overflow_arg_area_p = | ||||
3683 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); | ||||
3684 | llvm::Value *overflow_arg_area = | ||||
3685 | CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); | ||||
3686 | |||||
3687 | // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 | ||||
3688 | // byte boundary if alignment needed by type exceeds 8 byte boundary. | ||||
3689 | // It isn't stated explicitly in the standard, but in practice we use | ||||
3690 | // alignment greater than 16 where necessary. | ||||
3691 | CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); | ||||
3692 | if (Align > CharUnits::fromQuantity(8)) { | ||||
3693 | overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, | ||||
3694 | Align); | ||||
3695 | } | ||||
3696 | |||||
3697 | // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. | ||||
3698 | llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); | ||||
3699 | llvm::Value *Res = | ||||
3700 | CGF.Builder.CreateBitCast(overflow_arg_area, | ||||
3701 | llvm::PointerType::getUnqual(LTy)); | ||||
3702 | |||||
3703 | // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: | ||||
3704 | // l->overflow_arg_area + sizeof(type). | ||||
3705 | // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to | ||||
3706 | // an 8 byte boundary. | ||||
3707 | |||||
3708 | uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; | ||||
3709 | llvm::Value *Offset = | ||||
3710 | llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); | ||||
3711 | overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, | ||||
3712 | "overflow_arg_area.next"); | ||||
3713 | CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); | ||||
3714 | |||||
3715 | // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. | ||||
3716 | return Address(Res, Align); | ||||
3717 | } | ||||
3718 | |||||
3719 | Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
3720 | QualType Ty) const { | ||||
3721 | // Assume that va_list type is correct; should be pointer to LLVM type: | ||||
3722 | // struct { | ||||
3723 | // i32 gp_offset; | ||||
3724 | // i32 fp_offset; | ||||
3725 | // i8* overflow_arg_area; | ||||
3726 | // i8* reg_save_area; | ||||
3727 | // }; | ||||
3728 | unsigned neededInt, neededSSE; | ||||
3729 | |||||
3730 | Ty = getContext().getCanonicalType(Ty); | ||||
3731 | ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, | ||||
| |||||
3732 | /*isNamedArg*/false); | ||||
3733 | |||||
3734 | // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed | ||||
3735 | // in the registers. If not go to step 7. | ||||
3736 | if (!neededInt && !neededSSE) | ||||
3737 | return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); | ||||
3738 | |||||
3739 | // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of | ||||
3740 | // general purpose registers needed to pass type and num_fp to hold | ||||
3741 | // the number of floating point registers needed. | ||||
3742 | |||||
3743 | // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into | ||||
3744 | // registers. In the case: l->gp_offset > 48 - num_gp * 8 or | ||||
3745 | // l->fp_offset > 304 - num_fp * 16 go to step 7. | ||||
3746 | // | ||||
3747 | // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of | ||||
3748 | // register save space). | ||||
3749 | |||||
3750 | llvm::Value *InRegs = nullptr; | ||||
3751 | Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); | ||||
3752 | llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; | ||||
3753 | if (neededInt) { | ||||
3754 | gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); | ||||
3755 | gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); | ||||
3756 | InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); | ||||
3757 | InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); | ||||
3758 | } | ||||
3759 | |||||
3760 | if (neededSSE) { | ||||
3761 | fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); | ||||
3762 | fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); | ||||
3763 | llvm::Value *FitsInFP = | ||||
3764 | llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); | ||||
3765 | FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); | ||||
3766 | InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; | ||||
3767 | } | ||||
3768 | |||||
3769 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); | ||||
3770 | llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); | ||||
3771 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); | ||||
3772 | CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); | ||||
3773 | |||||
3774 | // Emit code to load the value if it was passed in registers. | ||||
3775 | |||||
3776 | CGF.EmitBlock(InRegBlock); | ||||
3777 | |||||
3778 | // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with | ||||
3779 | // an offset of l->gp_offset and/or l->fp_offset. This may require | ||||
3780 | // copying to a temporary location in case the parameter is passed | ||||
3781 | // in different register classes or requires an alignment greater | ||||
3782 | // than 8 for general purpose registers and 16 for XMM registers. | ||||
3783 | // | ||||
3784 | // FIXME: This really results in shameful code when we end up needing to | ||||
3785 | // collect arguments from different places; often what should result in a | ||||
3786 | // simple assembling of a structure from scattered addresses has many more | ||||
3787 | // loads than necessary. Can we clean this up? | ||||
3788 | llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); | ||||
3789 | llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( | ||||
3790 | CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area"); | ||||
3791 | |||||
3792 | Address RegAddr = Address::invalid(); | ||||
3793 | if (neededInt && neededSSE) { | ||||
3794 | // FIXME: Cleanup. | ||||
3795 | assert(AI.isDirect() && "Unexpected ABI info for mixed regs")((AI.isDirect() && "Unexpected ABI info for mixed regs" ) ? static_cast<void> (0) : __assert_fail ("AI.isDirect() && \"Unexpected ABI info for mixed regs\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3795, __PRETTY_FUNCTION__)); | ||||
3796 | llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); | ||||
3797 | Address Tmp = CGF.CreateMemTemp(Ty); | ||||
3798 | Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); | ||||
3799 | assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs")((ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs" ) ? static_cast<void> (0) : __assert_fail ("ST->getNumElements() == 2 && \"Unexpected ABI info for mixed regs\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3799, __PRETTY_FUNCTION__)); | ||||
3800 | llvm::Type *TyLo = ST->getElementType(0); | ||||
3801 | llvm::Type *TyHi = ST->getElementType(1); | ||||
3802 | assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&(((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && "Unexpected ABI info for mixed regs") ? static_cast <void> (0) : __assert_fail ("(TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && \"Unexpected ABI info for mixed regs\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3803, __PRETTY_FUNCTION__)) | ||||
3803 | "Unexpected ABI info for mixed regs")(((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && "Unexpected ABI info for mixed regs") ? static_cast <void> (0) : __assert_fail ("(TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && \"Unexpected ABI info for mixed regs\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3803, __PRETTY_FUNCTION__)); | ||||
3804 | llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); | ||||
3805 | llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); | ||||
3806 | llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset); | ||||
3807 | llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset); | ||||
3808 | llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; | ||||
3809 | llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; | ||||
3810 | |||||
3811 | // Copy the first element. | ||||
3812 | // FIXME: Our choice of alignment here and below is probably pessimistic. | ||||
3813 | llvm::Value *V = CGF.Builder.CreateAlignedLoad( | ||||
3814 | TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo), | ||||
3815 | CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo))); | ||||
3816 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); | ||||
3817 | |||||
3818 | // Copy the second element. | ||||
3819 | V = CGF.Builder.CreateAlignedLoad( | ||||
3820 | TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi), | ||||
3821 | CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi))); | ||||
3822 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); | ||||
3823 | |||||
3824 | RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); | ||||
3825 | } else if (neededInt) { | ||||
3826 | RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset), | ||||
3827 | CharUnits::fromQuantity(8)); | ||||
3828 | RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); | ||||
3829 | |||||
3830 | // Copy to a temporary if necessary to ensure the appropriate alignment. | ||||
3831 | std::pair<CharUnits, CharUnits> SizeAlign = | ||||
3832 | getContext().getTypeInfoInChars(Ty); | ||||
3833 | uint64_t TySize = SizeAlign.first.getQuantity(); | ||||
3834 | CharUnits TyAlign = SizeAlign.second; | ||||
3835 | |||||
3836 | // Copy into a temporary if the type is more aligned than the | ||||
3837 | // register save area. | ||||
3838 | if (TyAlign.getQuantity() > 8) { | ||||
3839 | Address Tmp = CGF.CreateMemTemp(Ty); | ||||
3840 | CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); | ||||
3841 | RegAddr = Tmp; | ||||
3842 | } | ||||
3843 | |||||
3844 | } else if (neededSSE == 1) { | ||||
3845 | RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), | ||||
3846 | CharUnits::fromQuantity(16)); | ||||
3847 | RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); | ||||
3848 | } else { | ||||
3849 | assert(neededSSE == 2 && "Invalid number of needed registers!")((neededSSE == 2 && "Invalid number of needed registers!" ) ? static_cast<void> (0) : __assert_fail ("neededSSE == 2 && \"Invalid number of needed registers!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 3849, __PRETTY_FUNCTION__)); | ||||
3850 | // SSE registers are spaced 16 bytes apart in the register save | ||||
3851 | // area, we need to collect the two eightbytes together. | ||||
3852 | // The ABI isn't explicit about this, but it seems reasonable | ||||
3853 | // to assume that the slots are 16-byte aligned, since the stack is | ||||
3854 | // naturally 16-byte aligned and the prologue is expected to store | ||||
3855 | // all the SSE registers to the RSA. | ||||
3856 | Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), | ||||
3857 | CharUnits::fromQuantity(16)); | ||||
3858 | Address RegAddrHi = | ||||
3859 | CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, | ||||
3860 | CharUnits::fromQuantity(16)); | ||||
3861 | llvm::Type *ST = AI.canHaveCoerceToType() | ||||
3862 | ? AI.getCoerceToType() | ||||
3863 | : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy); | ||||
3864 | llvm::Value *V; | ||||
3865 | Address Tmp = CGF.CreateMemTemp(Ty); | ||||
3866 | Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); | ||||
3867 | V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( | ||||
3868 | RegAddrLo, ST->getStructElementType(0))); | ||||
3869 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); | ||||
3870 | V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( | ||||
3871 | RegAddrHi, ST->getStructElementType(1))); | ||||
3872 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); | ||||
3873 | |||||
3874 | RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); | ||||
3875 | } | ||||
3876 | |||||
3877 | // AMD64-ABI 3.5.7p5: Step 5. Set: | ||||
3878 | // l->gp_offset = l->gp_offset + num_gp * 8 | ||||
3879 | // l->fp_offset = l->fp_offset + num_fp * 16. | ||||
3880 | if (neededInt) { | ||||
3881 | llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); | ||||
3882 | CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), | ||||
3883 | gp_offset_p); | ||||
3884 | } | ||||
3885 | if (neededSSE) { | ||||
3886 | llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); | ||||
3887 | CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), | ||||
3888 | fp_offset_p); | ||||
3889 | } | ||||
3890 | CGF.EmitBranch(ContBlock); | ||||
3891 | |||||
3892 | // Emit code to load the value if it was passed in memory. | ||||
3893 | |||||
3894 | CGF.EmitBlock(InMemBlock); | ||||
3895 | Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); | ||||
3896 | |||||
3897 | // Return the appropriate result. | ||||
3898 | |||||
3899 | CGF.EmitBlock(ContBlock); | ||||
3900 | Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, | ||||
3901 | "vaarg.addr"); | ||||
3902 | return ResAddr; | ||||
3903 | } | ||||
3904 | |||||
3905 | Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
3906 | QualType Ty) const { | ||||
3907 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, | ||||
3908 | CGF.getContext().getTypeInfoInChars(Ty), | ||||
3909 | CharUnits::fromQuantity(8), | ||||
3910 | /*allowHigherAlign*/ false); | ||||
3911 | } | ||||
3912 | |||||
3913 | ABIArgInfo | ||||
3914 | WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, | ||||
3915 | const ABIArgInfo ¤t) const { | ||||
3916 | // Assumes vectorCall calling convention. | ||||
3917 | const Type *Base = nullptr; | ||||
3918 | uint64_t NumElts = 0; | ||||
3919 | |||||
3920 | if (!Ty->isBuiltinType() && !Ty->isVectorType() && | ||||
3921 | isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) { | ||||
3922 | FreeSSERegs -= NumElts; | ||||
3923 | return getDirectX86Hva(); | ||||
3924 | } | ||||
3925 | return current; | ||||
3926 | } | ||||
3927 | |||||
3928 | ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, | ||||
3929 | bool IsReturnType, bool IsVectorCall, | ||||
3930 | bool IsRegCall) const { | ||||
3931 | |||||
3932 | if (Ty->isVoidType()) | ||||
3933 | return ABIArgInfo::getIgnore(); | ||||
3934 | |||||
3935 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
3936 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
3937 | |||||
3938 | TypeInfo Info = getContext().getTypeInfo(Ty); | ||||
3939 | uint64_t Width = Info.Width; | ||||
3940 | CharUnits Align = getContext().toCharUnitsFromBits(Info.Align); | ||||
3941 | |||||
3942 | const RecordType *RT = Ty->getAs<RecordType>(); | ||||
3943 | if (RT) { | ||||
3944 | if (!IsReturnType) { | ||||
3945 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) | ||||
3946 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
3947 | } | ||||
3948 | |||||
3949 | if (RT->getDecl()->hasFlexibleArrayMember()) | ||||
3950 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
3951 | |||||
3952 | } | ||||
3953 | |||||
3954 | const Type *Base = nullptr; | ||||
3955 | uint64_t NumElts = 0; | ||||
3956 | // vectorcall adds the concept of a homogenous vector aggregate, similar to | ||||
3957 | // other targets. | ||||
3958 | if ((IsVectorCall || IsRegCall) && | ||||
3959 | isHomogeneousAggregate(Ty, Base, NumElts)) { | ||||
3960 | if (IsRegCall) { | ||||
3961 | if (FreeSSERegs >= NumElts) { | ||||
3962 | FreeSSERegs -= NumElts; | ||||
3963 | if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) | ||||
3964 | return ABIArgInfo::getDirect(); | ||||
3965 | return ABIArgInfo::getExpand(); | ||||
3966 | } | ||||
3967 | return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); | ||||
3968 | } else if (IsVectorCall) { | ||||
3969 | if (FreeSSERegs >= NumElts && | ||||
3970 | (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) { | ||||
3971 | FreeSSERegs -= NumElts; | ||||
3972 | return ABIArgInfo::getDirect(); | ||||
3973 | } else if (IsReturnType) { | ||||
3974 | return ABIArgInfo::getExpand(); | ||||
3975 | } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) { | ||||
3976 | // HVAs are delayed and reclassified in the 2nd step. | ||||
3977 | return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); | ||||
3978 | } | ||||
3979 | } | ||||
3980 | } | ||||
3981 | |||||
3982 | if (Ty->isMemberPointerType()) { | ||||
3983 | // If the member pointer is represented by an LLVM int or ptr, pass it | ||||
3984 | // directly. | ||||
3985 | llvm::Type *LLTy = CGT.ConvertType(Ty); | ||||
3986 | if (LLTy->isPointerTy() || LLTy->isIntegerTy()) | ||||
3987 | return ABIArgInfo::getDirect(); | ||||
3988 | } | ||||
3989 | |||||
3990 | if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { | ||||
3991 | // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is | ||||
3992 | // not 1, 2, 4, or 8 bytes, must be passed by reference." | ||||
3993 | if (Width > 64 || !llvm::isPowerOf2_64(Width)) | ||||
3994 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
3995 | |||||
3996 | // Otherwise, coerce it to a small integer. | ||||
3997 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); | ||||
3998 | } | ||||
3999 | |||||
4000 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { | ||||
4001 | switch (BT->getKind()) { | ||||
4002 | case BuiltinType::Bool: | ||||
4003 | // Bool type is always extended to the ABI, other builtin types are not | ||||
4004 | // extended. | ||||
4005 | return ABIArgInfo::getExtend(Ty); | ||||
4006 | |||||
4007 | case BuiltinType::LongDouble: | ||||
4008 | // Mingw64 GCC uses the old 80 bit extended precision floating point | ||||
4009 | // unit. It passes them indirectly through memory. | ||||
4010 | if (IsMingw64) { | ||||
4011 | const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); | ||||
4012 | if (LDF == &llvm::APFloat::x87DoubleExtended()) | ||||
4013 | return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); | ||||
4014 | } | ||||
4015 | break; | ||||
4016 | |||||
4017 | case BuiltinType::Int128: | ||||
4018 | case BuiltinType::UInt128: | ||||
4019 | // If it's a parameter type, the normal ABI rule is that arguments larger | ||||
4020 | // than 8 bytes are passed indirectly. GCC follows it. We follow it too, | ||||
4021 | // even though it isn't particularly efficient. | ||||
4022 | if (!IsReturnType) | ||||
4023 | return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); | ||||
4024 | |||||
4025 | // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that. | ||||
4026 | // Clang matches them for compatibility. | ||||
4027 | return ABIArgInfo::getDirect( | ||||
4028 | llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), 2)); | ||||
4029 | |||||
4030 | default: | ||||
4031 | break; | ||||
4032 | } | ||||
4033 | } | ||||
4034 | |||||
4035 | return ABIArgInfo::getDirect(); | ||||
4036 | } | ||||
4037 | |||||
4038 | void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, | ||||
4039 | unsigned FreeSSERegs, | ||||
4040 | bool IsVectorCall, | ||||
4041 | bool IsRegCall) const { | ||||
4042 | unsigned Count = 0; | ||||
4043 | for (auto &I : FI.arguments()) { | ||||
4044 | // Vectorcall in x64 only permits the first 6 arguments to be passed | ||||
4045 | // as XMM/YMM registers. | ||||
4046 | if (Count < VectorcallMaxParamNumAsReg) | ||||
4047 | I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); | ||||
4048 | else { | ||||
4049 | // Since these cannot be passed in registers, pretend no registers | ||||
4050 | // are left. | ||||
4051 | unsigned ZeroSSERegsAvail = 0; | ||||
4052 | I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false, | ||||
4053 | IsVectorCall, IsRegCall); | ||||
4054 | } | ||||
4055 | ++Count; | ||||
4056 | } | ||||
4057 | |||||
4058 | for (auto &I : FI.arguments()) { | ||||
4059 | I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info); | ||||
4060 | } | ||||
4061 | } | ||||
4062 | |||||
4063 | void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
4064 | const unsigned CC = FI.getCallingConvention(); | ||||
4065 | bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall; | ||||
4066 | bool IsRegCall = CC == llvm::CallingConv::X86_RegCall; | ||||
4067 | |||||
4068 | // If __attribute__((sysv_abi)) is in use, use the SysV argument | ||||
4069 | // classification rules. | ||||
4070 | if (CC == llvm::CallingConv::X86_64_SysV) { | ||||
4071 | X86_64ABIInfo SysVABIInfo(CGT, AVXLevel); | ||||
4072 | SysVABIInfo.computeInfo(FI); | ||||
4073 | return; | ||||
4074 | } | ||||
4075 | |||||
4076 | unsigned FreeSSERegs = 0; | ||||
4077 | if (IsVectorCall) { | ||||
4078 | // We can use up to 4 SSE return registers with vectorcall. | ||||
4079 | FreeSSERegs = 4; | ||||
4080 | } else if (IsRegCall) { | ||||
4081 | // RegCall gives us 16 SSE registers. | ||||
4082 | FreeSSERegs = 16; | ||||
4083 | } | ||||
4084 | |||||
4085 | if (!getCXXABI().classifyReturnType(FI)) | ||||
4086 | FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true, | ||||
4087 | IsVectorCall, IsRegCall); | ||||
4088 | |||||
4089 | if (IsVectorCall) { | ||||
4090 | // We can use up to 6 SSE register parameters with vectorcall. | ||||
4091 | FreeSSERegs = 6; | ||||
4092 | } else if (IsRegCall) { | ||||
4093 | // RegCall gives us 16 SSE registers, we can reuse the return registers. | ||||
4094 | FreeSSERegs = 16; | ||||
4095 | } | ||||
4096 | |||||
4097 | if (IsVectorCall) { | ||||
4098 | computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall); | ||||
4099 | } else { | ||||
4100 | for (auto &I : FI.arguments()) | ||||
4101 | I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); | ||||
4102 | } | ||||
4103 | |||||
4104 | } | ||||
4105 | |||||
4106 | Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
4107 | QualType Ty) const { | ||||
4108 | |||||
4109 | bool IsIndirect = false; | ||||
4110 | |||||
4111 | // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is | ||||
4112 | // not 1, 2, 4, or 8 bytes, must be passed by reference." | ||||
4113 | if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) { | ||||
4114 | uint64_t Width = getContext().getTypeSize(Ty); | ||||
4115 | IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); | ||||
4116 | } | ||||
4117 | |||||
4118 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, | ||||
4119 | CGF.getContext().getTypeInfoInChars(Ty), | ||||
4120 | CharUnits::fromQuantity(8), | ||||
4121 | /*allowHigherAlign*/ false); | ||||
4122 | } | ||||
4123 | |||||
4124 | // PowerPC-32 | ||||
4125 | namespace { | ||||
4126 | /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. | ||||
4127 | class PPC32_SVR4_ABIInfo : public DefaultABIInfo { | ||||
4128 | bool IsSoftFloatABI; | ||||
4129 | |||||
4130 | CharUnits getParamTypeAlignment(QualType Ty) const; | ||||
4131 | |||||
4132 | public: | ||||
4133 | PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI) | ||||
4134 | : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {} | ||||
4135 | |||||
4136 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
4137 | QualType Ty) const override; | ||||
4138 | }; | ||||
4139 | |||||
4140 | class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { | ||||
4141 | public: | ||||
4142 | PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI) | ||||
4143 | : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {} | ||||
4144 | |||||
4145 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { | ||||
4146 | // This is recovered from gcc output. | ||||
4147 | return 1; // r1 is the dedicated stack pointer | ||||
4148 | } | ||||
4149 | |||||
4150 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
4151 | llvm::Value *Address) const override; | ||||
4152 | }; | ||||
4153 | } | ||||
4154 | |||||
4155 | CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { | ||||
4156 | // Complex types are passed just like their elements | ||||
4157 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) | ||||
4158 | Ty = CTy->getElementType(); | ||||
4159 | |||||
4160 | if (Ty->isVectorType()) | ||||
4161 | return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 | ||||
4162 | : 4); | ||||
4163 | |||||
4164 | // For single-element float/vector structs, we consider the whole type | ||||
4165 | // to have the same alignment requirements as its single element. | ||||
4166 | const Type *AlignTy = nullptr; | ||||
4167 | if (const Type *EltType = isSingleElementStruct(Ty, getContext())) { | ||||
4168 | const BuiltinType *BT = EltType->getAs<BuiltinType>(); | ||||
4169 | if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || | ||||
4170 | (BT && BT->isFloatingPoint())) | ||||
4171 | AlignTy = EltType; | ||||
4172 | } | ||||
4173 | |||||
4174 | if (AlignTy) | ||||
4175 | return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4); | ||||
4176 | return CharUnits::fromQuantity(4); | ||||
4177 | } | ||||
4178 | |||||
4179 | // TODO: this implementation is now likely redundant with | ||||
4180 | // DefaultABIInfo::EmitVAArg. | ||||
4181 | Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, | ||||
4182 | QualType Ty) const { | ||||
4183 | if (getTarget().getTriple().isOSDarwin()) { | ||||
4184 | auto TI = getContext().getTypeInfoInChars(Ty); | ||||
4185 | TI.second = getParamTypeAlignment(Ty); | ||||
4186 | |||||
4187 | CharUnits SlotSize = CharUnits::fromQuantity(4); | ||||
4188 | return emitVoidPtrVAArg(CGF, VAList, Ty, | ||||
4189 | classifyArgumentType(Ty).isIndirect(), TI, SlotSize, | ||||
4190 | /*AllowHigherAlign=*/true); | ||||
4191 | } | ||||
4192 | |||||
4193 | const unsigned OverflowLimit = 8; | ||||
4194 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { | ||||
4195 | // TODO: Implement this. For now ignore. | ||||
4196 | (void)CTy; | ||||
4197 | return Address::invalid(); // FIXME? | ||||
4198 | } | ||||
4199 | |||||
4200 | // struct __va_list_tag { | ||||
4201 | // unsigned char gpr; | ||||
4202 | // unsigned char fpr; | ||||
4203 | // unsigned short reserved; | ||||
4204 | // void *overflow_arg_area; | ||||
4205 | // void *reg_save_area; | ||||
4206 | // }; | ||||
4207 | |||||
4208 | bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; | ||||
4209 | bool isInt = | ||||
4210 | Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); | ||||
4211 | bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64; | ||||
4212 | |||||
4213 | // All aggregates are passed indirectly? That doesn't seem consistent | ||||
4214 | // with the argument-lowering code. | ||||
4215 | bool isIndirect = Ty->isAggregateType(); | ||||
4216 | |||||
4217 | CGBuilderTy &Builder = CGF.Builder; | ||||
4218 | |||||
4219 | // The calling convention either uses 1-2 GPRs or 1 FPR. | ||||
4220 | Address NumRegsAddr = Address::invalid(); | ||||
4221 | if (isInt || IsSoftFloatABI) { | ||||
4222 | NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr"); | ||||
4223 | } else { | ||||
4224 | NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr"); | ||||
4225 | } | ||||
4226 | |||||
4227 | llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); | ||||
4228 | |||||
4229 | // "Align" the register count when TY is i64. | ||||
4230 | if (isI64 || (isF64 && IsSoftFloatABI)) { | ||||
4231 | NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); | ||||
4232 | NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); | ||||
4233 | } | ||||
4234 | |||||
4235 | llvm::Value *CC = | ||||
4236 | Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond"); | ||||
4237 | |||||
4238 | llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); | ||||
4239 | llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); | ||||
4240 | llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); | ||||
4241 | |||||
4242 | Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); | ||||
4243 | |||||
4244 | llvm::Type *DirectTy = CGF.ConvertType(Ty); | ||||
4245 | if (isIndirect) DirectTy = DirectTy->getPointerTo(0); | ||||
4246 | |||||
4247 | // Case 1: consume registers. | ||||
4248 | Address RegAddr = Address::invalid(); | ||||
4249 | { | ||||
4250 | CGF.EmitBlock(UsingRegs); | ||||
4251 | |||||
4252 | Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4); | ||||
4253 | RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), | ||||
4254 | CharUnits::fromQuantity(8)); | ||||
4255 | assert(RegAddr.getElementType() == CGF.Int8Ty)((RegAddr.getElementType() == CGF.Int8Ty) ? static_cast<void > (0) : __assert_fail ("RegAddr.getElementType() == CGF.Int8Ty" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 4255, __PRETTY_FUNCTION__)); | ||||
4256 | |||||
4257 | // Floating-point registers start after the general-purpose registers. | ||||
4258 | if (!(isInt || IsSoftFloatABI)) { | ||||
4259 | RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, | ||||
4260 | CharUnits::fromQuantity(32)); | ||||
4261 | } | ||||
4262 | |||||
4263 | // Get the address of the saved value by scaling the number of | ||||
4264 | // registers we've used by the number of | ||||
4265 | CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); | ||||
4266 | llvm::Value *RegOffset = | ||||
4267 | Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); | ||||
4268 | RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty, | ||||
4269 | RegAddr.getPointer(), RegOffset), | ||||
4270 | RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); | ||||
4271 | RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); | ||||
4272 | |||||
4273 | // Increase the used-register count. | ||||
4274 | NumRegs = | ||||
4275 | Builder.CreateAdd(NumRegs, | ||||
4276 | Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); | ||||
4277 | Builder.CreateStore(NumRegs, NumRegsAddr); | ||||
4278 | |||||
4279 | CGF.EmitBranch(Cont); | ||||
4280 | } | ||||
4281 | |||||
4282 | // Case 2: consume space in the overflow area. | ||||
4283 | Address MemAddr = Address::invalid(); | ||||
4284 | { | ||||
4285 | CGF.EmitBlock(UsingOverflow); | ||||
4286 | |||||
4287 | Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr); | ||||
4288 | |||||
4289 | // Everything in the overflow area is rounded up to a size of at least 4. | ||||
4290 | CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); | ||||
4291 | |||||
4292 | CharUnits Size; | ||||
4293 | if (!isIndirect) { | ||||
4294 | auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); | ||||
4295 | Size = TypeInfo.first.alignTo(OverflowAreaAlign); | ||||
4296 | } else { | ||||
4297 | Size = CGF.getPointerSize(); | ||||
4298 | } | ||||
4299 | |||||
4300 | Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3); | ||||
4301 | Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), | ||||
4302 | OverflowAreaAlign); | ||||
4303 | // Round up address of argument to alignment | ||||
4304 | CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); | ||||
4305 | if (Align > OverflowAreaAlign) { | ||||
4306 | llvm::Value *Ptr = OverflowArea.getPointer(); | ||||
4307 | OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), | ||||
4308 | Align); | ||||
4309 | } | ||||
4310 | |||||
4311 | MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); | ||||
4312 | |||||
4313 | // Increase the overflow area. | ||||
4314 | OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); | ||||
4315 | Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); | ||||
4316 | CGF.EmitBranch(Cont); | ||||
4317 | } | ||||
4318 | |||||
4319 | CGF.EmitBlock(Cont); | ||||
4320 | |||||
4321 | // Merge the cases with a phi. | ||||
4322 | Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, | ||||
4323 | "vaarg.addr"); | ||||
4324 | |||||
4325 | // Load the pointer if the argument was passed indirectly. | ||||
4326 | if (isIndirect) { | ||||
4327 | Result = Address(Builder.CreateLoad(Result, "aggr"), | ||||
4328 | getContext().getTypeAlignInChars(Ty)); | ||||
4329 | } | ||||
4330 | |||||
4331 | return Result; | ||||
4332 | } | ||||
4333 | |||||
4334 | bool | ||||
4335 | PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
4336 | llvm::Value *Address) const { | ||||
4337 | // This is calculated from the LLVM and GCC tables and verified | ||||
4338 | // against gcc output. AFAIK all ABIs use the same encoding. | ||||
4339 | |||||
4340 | CodeGen::CGBuilderTy &Builder = CGF.Builder; | ||||
4341 | |||||
4342 | llvm::IntegerType *i8 = CGF.Int8Ty; | ||||
4343 | llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); | ||||
4344 | llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); | ||||
4345 | llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); | ||||
4346 | |||||
4347 | // 0-31: r0-31, the 4-byte general-purpose registers | ||||
4348 | AssignToArrayRange(Builder, Address, Four8, 0, 31); | ||||
4349 | |||||
4350 | // 32-63: fp0-31, the 8-byte floating-point registers | ||||
4351 | AssignToArrayRange(Builder, Address, Eight8, 32, 63); | ||||
4352 | |||||
4353 | // 64-76 are various 4-byte special-purpose registers: | ||||
4354 | // 64: mq | ||||
4355 | // 65: lr | ||||
4356 | // 66: ctr | ||||
4357 | // 67: ap | ||||
4358 | // 68-75 cr0-7 | ||||
4359 | // 76: xer | ||||
4360 | AssignToArrayRange(Builder, Address, Four8, 64, 76); | ||||
4361 | |||||
4362 | // 77-108: v0-31, the 16-byte vector registers | ||||
4363 | AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); | ||||
4364 | |||||
4365 | // 109: vrsave | ||||
4366 | // 110: vscr | ||||
4367 | // 111: spe_acc | ||||
4368 | // 112: spefscr | ||||
4369 | // 113: sfp | ||||
4370 | AssignToArrayRange(Builder, Address, Four8, 109, 113); | ||||
4371 | |||||
4372 | return false; | ||||
4373 | } | ||||
4374 | |||||
4375 | // PowerPC-64 | ||||
4376 | |||||
4377 | namespace { | ||||
4378 | /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. | ||||
4379 | class PPC64_SVR4_ABIInfo : public SwiftABIInfo { | ||||
4380 | public: | ||||
4381 | enum ABIKind { | ||||
4382 | ELFv1 = 0, | ||||
4383 | ELFv2 | ||||
4384 | }; | ||||
4385 | |||||
4386 | private: | ||||
4387 | static const unsigned GPRBits = 64; | ||||
4388 | ABIKind Kind; | ||||
4389 | bool HasQPX; | ||||
4390 | bool IsSoftFloatABI; | ||||
4391 | |||||
4392 | // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and | ||||
4393 | // will be passed in a QPX register. | ||||
4394 | bool IsQPXVectorTy(const Type *Ty) const { | ||||
4395 | if (!HasQPX) | ||||
4396 | return false; | ||||
4397 | |||||
4398 | if (const VectorType *VT = Ty->getAs<VectorType>()) { | ||||
4399 | unsigned NumElements = VT->getNumElements(); | ||||
4400 | if (NumElements == 1) | ||||
4401 | return false; | ||||
4402 | |||||
4403 | if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) { | ||||
4404 | if (getContext().getTypeSize(Ty) <= 256) | ||||
4405 | return true; | ||||
4406 | } else if (VT->getElementType()-> | ||||
4407 | isSpecificBuiltinType(BuiltinType::Float)) { | ||||
4408 | if (getContext().getTypeSize(Ty) <= 128) | ||||
4409 | return true; | ||||
4410 | } | ||||
4411 | } | ||||
4412 | |||||
4413 | return false; | ||||
4414 | } | ||||
4415 | |||||
4416 | bool IsQPXVectorTy(QualType Ty) const { | ||||
4417 | return IsQPXVectorTy(Ty.getTypePtr()); | ||||
4418 | } | ||||
4419 | |||||
4420 | public: | ||||
4421 | PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX, | ||||
4422 | bool SoftFloatABI) | ||||
4423 | : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX), | ||||
4424 | IsSoftFloatABI(SoftFloatABI) {} | ||||
4425 | |||||
4426 | bool isPromotableTypeForABI(QualType Ty) const; | ||||
4427 | CharUnits getParamTypeAlignment(QualType Ty) const; | ||||
4428 | |||||
4429 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
4430 | ABIArgInfo classifyArgumentType(QualType Ty) const; | ||||
4431 | |||||
4432 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; | ||||
4433 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, | ||||
4434 | uint64_t Members) const override; | ||||
4435 | |||||
4436 | // TODO: We can add more logic to computeInfo to improve performance. | ||||
4437 | // Example: For aggregate arguments that fit in a register, we could | ||||
4438 | // use getDirectInReg (as is done below for structs containing a single | ||||
4439 | // floating-point value) to avoid pushing them to memory on function | ||||
4440 | // entry. This would require changing the logic in PPCISelLowering | ||||
4441 | // when lowering the parameters in the caller and args in the callee. | ||||
4442 | void computeInfo(CGFunctionInfo &FI) const override { | ||||
4443 | if (!getCXXABI().classifyReturnType(FI)) | ||||
4444 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
4445 | for (auto &I : FI.arguments()) { | ||||
4446 | // We rely on the default argument classification for the most part. | ||||
4447 | // One exception: An aggregate containing a single floating-point | ||||
4448 | // or vector item must be passed in a register if one is available. | ||||
4449 | const Type *T = isSingleElementStruct(I.type, getContext()); | ||||
4450 | if (T) { | ||||
4451 | const BuiltinType *BT = T->getAs<BuiltinType>(); | ||||
4452 | if (IsQPXVectorTy(T) || | ||||
4453 | (T->isVectorType() && getContext().getTypeSize(T) == 128) || | ||||
4454 | (BT && BT->isFloatingPoint())) { | ||||
4455 | QualType QT(T, 0); | ||||
4456 | I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); | ||||
4457 | continue; | ||||
4458 | } | ||||
4459 | } | ||||
4460 | I.info = classifyArgumentType(I.type); | ||||
4461 | } | ||||
4462 | } | ||||
4463 | |||||
4464 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
4465 | QualType Ty) const override; | ||||
4466 | |||||
4467 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, | ||||
4468 | bool asReturnValue) const override { | ||||
4469 | return occupiesMoreThan(CGT, scalars, /*total*/ 4); | ||||
4470 | } | ||||
4471 | |||||
4472 | bool isSwiftErrorInRegister() const override { | ||||
4473 | return false; | ||||
4474 | } | ||||
4475 | }; | ||||
4476 | |||||
4477 | class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { | ||||
4478 | |||||
4479 | public: | ||||
4480 | PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, | ||||
4481 | PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX, | ||||
4482 | bool SoftFloatABI) | ||||
4483 | : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX, | ||||
4484 | SoftFloatABI)) {} | ||||
4485 | |||||
4486 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { | ||||
4487 | // This is recovered from gcc output. | ||||
4488 | return 1; // r1 is the dedicated stack pointer | ||||
4489 | } | ||||
4490 | |||||
4491 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
4492 | llvm::Value *Address) const override; | ||||
4493 | }; | ||||
4494 | |||||
4495 | class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { | ||||
4496 | public: | ||||
4497 | PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} | ||||
4498 | |||||
4499 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { | ||||
4500 | // This is recovered from gcc output. | ||||
4501 | return 1; // r1 is the dedicated stack pointer | ||||
4502 | } | ||||
4503 | |||||
4504 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
4505 | llvm::Value *Address) const override; | ||||
4506 | }; | ||||
4507 | |||||
4508 | } | ||||
4509 | |||||
4510 | // Return true if the ABI requires Ty to be passed sign- or zero- | ||||
4511 | // extended to 64 bits. | ||||
4512 | bool | ||||
4513 | PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { | ||||
4514 | // Treat an enum type as its underlying type. | ||||
4515 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
4516 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
4517 | |||||
4518 | // Promotable integer types are required to be promoted by the ABI. | ||||
4519 | if (Ty->isPromotableIntegerType()) | ||||
4520 | return true; | ||||
4521 | |||||
4522 | // In addition to the usual promotable integer types, we also need to | ||||
4523 | // extend all 32-bit types, since the ABI requires promotion to 64 bits. | ||||
4524 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) | ||||
4525 | switch (BT->getKind()) { | ||||
4526 | case BuiltinType::Int: | ||||
4527 | case BuiltinType::UInt: | ||||
4528 | return true; | ||||
4529 | default: | ||||
4530 | break; | ||||
4531 | } | ||||
4532 | |||||
4533 | return false; | ||||
4534 | } | ||||
4535 | |||||
4536 | /// isAlignedParamType - Determine whether a type requires 16-byte or | ||||
4537 | /// higher alignment in the parameter area. Always returns at least 8. | ||||
4538 | CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { | ||||
4539 | // Complex types are passed just like their elements. | ||||
4540 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) | ||||
4541 | Ty = CTy->getElementType(); | ||||
4542 | |||||
4543 | // Only vector types of size 16 bytes need alignment (larger types are | ||||
4544 | // passed via reference, smaller types are not aligned). | ||||
4545 | if (IsQPXVectorTy(Ty)) { | ||||
4546 | if (getContext().getTypeSize(Ty) > 128) | ||||
4547 | return CharUnits::fromQuantity(32); | ||||
4548 | |||||
4549 | return CharUnits::fromQuantity(16); | ||||
4550 | } else if (Ty->isVectorType()) { | ||||
4551 | return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); | ||||
4552 | } | ||||
4553 | |||||
4554 | // For single-element float/vector structs, we consider the whole type | ||||
4555 | // to have the same alignment requirements as its single element. | ||||
4556 | const Type *AlignAsType = nullptr; | ||||
4557 | const Type *EltType = isSingleElementStruct(Ty, getContext()); | ||||
4558 | if (EltType) { | ||||
4559 | const BuiltinType *BT = EltType->getAs<BuiltinType>(); | ||||
4560 | if (IsQPXVectorTy(EltType) || (EltType->isVectorType() && | ||||
4561 | getContext().getTypeSize(EltType) == 128) || | ||||
4562 | (BT && BT->isFloatingPoint())) | ||||
4563 | AlignAsType = EltType; | ||||
4564 | } | ||||
4565 | |||||
4566 | // Likewise for ELFv2 homogeneous aggregates. | ||||
4567 | const Type *Base = nullptr; | ||||
4568 | uint64_t Members = 0; | ||||
4569 | if (!AlignAsType && Kind == ELFv2 && | ||||
4570 | isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) | ||||
4571 | AlignAsType = Base; | ||||
4572 | |||||
4573 | // With special case aggregates, only vector base types need alignment. | ||||
4574 | if (AlignAsType && IsQPXVectorTy(AlignAsType)) { | ||||
4575 | if (getContext().getTypeSize(AlignAsType) > 128) | ||||
4576 | return CharUnits::fromQuantity(32); | ||||
4577 | |||||
4578 | return CharUnits::fromQuantity(16); | ||||
4579 | } else if (AlignAsType) { | ||||
4580 | return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8); | ||||
4581 | } | ||||
4582 | |||||
4583 | // Otherwise, we only need alignment for any aggregate type that | ||||
4584 | // has an alignment requirement of >= 16 bytes. | ||||
4585 | if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { | ||||
4586 | if (HasQPX && getContext().getTypeAlign(Ty) >= 256) | ||||
4587 | return CharUnits::fromQuantity(32); | ||||
4588 | return CharUnits::fromQuantity(16); | ||||
4589 | } | ||||
4590 | |||||
4591 | return CharUnits::fromQuantity(8); | ||||
4592 | } | ||||
4593 | |||||
4594 | /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous | ||||
4595 | /// aggregate. Base is set to the base element type, and Members is set | ||||
4596 | /// to the number of base elements. | ||||
4597 | bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, | ||||
4598 | uint64_t &Members) const { | ||||
4599 | if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { | ||||
4600 | uint64_t NElements = AT->getSize().getZExtValue(); | ||||
4601 | if (NElements == 0) | ||||
4602 | return false; | ||||
4603 | if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) | ||||
4604 | return false; | ||||
4605 | Members *= NElements; | ||||
4606 | } else if (const RecordType *RT = Ty->getAs<RecordType>()) { | ||||
4607 | const RecordDecl *RD = RT->getDecl(); | ||||
4608 | if (RD->hasFlexibleArrayMember()) | ||||
4609 | return false; | ||||
4610 | |||||
4611 | Members = 0; | ||||
4612 | |||||
4613 | // If this is a C++ record, check the bases first. | ||||
4614 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { | ||||
4615 | for (const auto &I : CXXRD->bases()) { | ||||
4616 | // Ignore empty records. | ||||
4617 | if (isEmptyRecord(getContext(), I.getType(), true)) | ||||
4618 | continue; | ||||
4619 | |||||
4620 | uint64_t FldMembers; | ||||
4621 | if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) | ||||
4622 | return false; | ||||
4623 | |||||
4624 | Members += FldMembers; | ||||
4625 | } | ||||
4626 | } | ||||
4627 | |||||
4628 | for (const auto *FD : RD->fields()) { | ||||
4629 | // Ignore (non-zero arrays of) empty records. | ||||
4630 | QualType FT = FD->getType(); | ||||
4631 | while (const ConstantArrayType *AT = | ||||
4632 | getContext().getAsConstantArrayType(FT)) { | ||||
4633 | if (AT->getSize().getZExtValue() == 0) | ||||
4634 | return false; | ||||
4635 | FT = AT->getElementType(); | ||||
4636 | } | ||||
4637 | if (isEmptyRecord(getContext(), FT, true)) | ||||
4638 | continue; | ||||
4639 | |||||
4640 | // For compatibility with GCC, ignore empty bitfields in C++ mode. | ||||
4641 | if (getContext().getLangOpts().CPlusPlus && | ||||
4642 | FD->isZeroLengthBitField(getContext())) | ||||
4643 | continue; | ||||
4644 | |||||
4645 | uint64_t FldMembers; | ||||
4646 | if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) | ||||
4647 | return false; | ||||
4648 | |||||
4649 | Members = (RD->isUnion() ? | ||||
4650 | std::max(Members, FldMembers) : Members + FldMembers); | ||||
4651 | } | ||||
4652 | |||||
4653 | if (!Base) | ||||
4654 | return false; | ||||
4655 | |||||
4656 | // Ensure there is no padding. | ||||
4657 | if (getContext().getTypeSize(Base) * Members != | ||||
4658 | getContext().getTypeSize(Ty)) | ||||
4659 | return false; | ||||
4660 | } else { | ||||
4661 | Members = 1; | ||||
4662 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { | ||||
4663 | Members = 2; | ||||
4664 | Ty = CT->getElementType(); | ||||
4665 | } | ||||
4666 | |||||
4667 | // Most ABIs only support float, double, and some vector type widths. | ||||
4668 | if (!isHomogeneousAggregateBaseType(Ty)) | ||||
4669 | return false; | ||||
4670 | |||||
4671 | // The base type must be the same for all members. Types that | ||||
4672 | // agree in both total size and mode (float vs. vector) are | ||||
4673 | // treated as being equivalent here. | ||||
4674 | const Type *TyPtr = Ty.getTypePtr(); | ||||
4675 | if (!Base) { | ||||
4676 | Base = TyPtr; | ||||
4677 | // If it's a non-power-of-2 vector, its size is already a power-of-2, | ||||
4678 | // so make sure to widen it explicitly. | ||||
4679 | if (const VectorType *VT = Base->getAs<VectorType>()) { | ||||
4680 | QualType EltTy = VT->getElementType(); | ||||
4681 | unsigned NumElements = | ||||
4682 | getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); | ||||
4683 | Base = getContext() | ||||
4684 | .getVectorType(EltTy, NumElements, VT->getVectorKind()) | ||||
4685 | .getTypePtr(); | ||||
4686 | } | ||||
4687 | } | ||||
4688 | |||||
4689 | if (Base->isVectorType() != TyPtr->isVectorType() || | ||||
4690 | getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) | ||||
4691 | return false; | ||||
4692 | } | ||||
4693 | return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); | ||||
4694 | } | ||||
4695 | |||||
4696 | bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { | ||||
4697 | // Homogeneous aggregates for ELFv2 must have base types of float, | ||||
4698 | // double, long double, or 128-bit vectors. | ||||
4699 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { | ||||
4700 | if (BT->getKind() == BuiltinType::Float || | ||||
4701 | BT->getKind() == BuiltinType::Double || | ||||
4702 | BT->getKind() == BuiltinType::LongDouble || | ||||
4703 | (getContext().getTargetInfo().hasFloat128Type() && | ||||
4704 | (BT->getKind() == BuiltinType::Float128))) { | ||||
4705 | if (IsSoftFloatABI) | ||||
4706 | return false; | ||||
4707 | return true; | ||||
4708 | } | ||||
4709 | } | ||||
4710 | if (const VectorType *VT = Ty->getAs<VectorType>()) { | ||||
4711 | if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty)) | ||||
4712 | return true; | ||||
4713 | } | ||||
4714 | return false; | ||||
4715 | } | ||||
4716 | |||||
4717 | bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( | ||||
4718 | const Type *Base, uint64_t Members) const { | ||||
4719 | // Vector and fp128 types require one register, other floating point types | ||||
4720 | // require one or two registers depending on their size. | ||||
4721 | uint32_t NumRegs = | ||||
4722 | ((getContext().getTargetInfo().hasFloat128Type() && | ||||
4723 | Base->isFloat128Type()) || | ||||
4724 | Base->isVectorType()) ? 1 | ||||
4725 | : (getContext().getTypeSize(Base) + 63) / 64; | ||||
4726 | |||||
4727 | // Homogeneous Aggregates may occupy at most 8 registers. | ||||
4728 | return Members * NumRegs <= 8; | ||||
4729 | } | ||||
4730 | |||||
4731 | ABIArgInfo | ||||
4732 | PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { | ||||
4733 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
4734 | |||||
4735 | if (Ty->isAnyComplexType()) | ||||
4736 | return ABIArgInfo::getDirect(); | ||||
4737 | |||||
4738 | // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) | ||||
4739 | // or via reference (larger than 16 bytes). | ||||
4740 | if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) { | ||||
4741 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
4742 | if (Size > 128) | ||||
4743 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
4744 | else if (Size < 128) { | ||||
4745 | llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); | ||||
4746 | return ABIArgInfo::getDirect(CoerceTy); | ||||
4747 | } | ||||
4748 | } | ||||
4749 | |||||
4750 | if (isAggregateTypeForABI(Ty)) { | ||||
4751 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) | ||||
4752 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
4753 | |||||
4754 | uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); | ||||
4755 | uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); | ||||
4756 | |||||
4757 | // ELFv2 homogeneous aggregates are passed as array types. | ||||
4758 | const Type *Base = nullptr; | ||||
4759 | uint64_t Members = 0; | ||||
4760 | if (Kind == ELFv2 && | ||||
4761 | isHomogeneousAggregate(Ty, Base, Members)) { | ||||
4762 | llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); | ||||
4763 | llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); | ||||
4764 | return ABIArgInfo::getDirect(CoerceTy); | ||||
4765 | } | ||||
4766 | |||||
4767 | // If an aggregate may end up fully in registers, we do not | ||||
4768 | // use the ByVal method, but pass the aggregate as array. | ||||
4769 | // This is usually beneficial since we avoid forcing the | ||||
4770 | // back-end to store the argument to memory. | ||||
4771 | uint64_t Bits = getContext().getTypeSize(Ty); | ||||
4772 | if (Bits > 0 && Bits <= 8 * GPRBits) { | ||||
4773 | llvm::Type *CoerceTy; | ||||
4774 | |||||
4775 | // Types up to 8 bytes are passed as integer type (which will be | ||||
4776 | // properly aligned in the argument save area doubleword). | ||||
4777 | if (Bits <= GPRBits) | ||||
4778 | CoerceTy = | ||||
4779 | llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); | ||||
4780 | // Larger types are passed as arrays, with the base type selected | ||||
4781 | // according to the required alignment in the save area. | ||||
4782 | else { | ||||
4783 | uint64_t RegBits = ABIAlign * 8; | ||||
4784 | uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits; | ||||
4785 | llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); | ||||
4786 | CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); | ||||
4787 | } | ||||
4788 | |||||
4789 | return ABIArgInfo::getDirect(CoerceTy); | ||||
4790 | } | ||||
4791 | |||||
4792 | // All other aggregates are passed ByVal. | ||||
4793 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), | ||||
4794 | /*ByVal=*/true, | ||||
4795 | /*Realign=*/TyAlign > ABIAlign); | ||||
4796 | } | ||||
4797 | |||||
4798 | return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) | ||||
4799 | : ABIArgInfo::getDirect()); | ||||
4800 | } | ||||
4801 | |||||
4802 | ABIArgInfo | ||||
4803 | PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { | ||||
4804 | if (RetTy->isVoidType()) | ||||
4805 | return ABIArgInfo::getIgnore(); | ||||
4806 | |||||
4807 | if (RetTy->isAnyComplexType()) | ||||
4808 | return ABIArgInfo::getDirect(); | ||||
4809 | |||||
4810 | // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) | ||||
4811 | // or via reference (larger than 16 bytes). | ||||
4812 | if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) { | ||||
4813 | uint64_t Size = getContext().getTypeSize(RetTy); | ||||
4814 | if (Size > 128) | ||||
4815 | return getNaturalAlignIndirect(RetTy); | ||||
4816 | else if (Size < 128) { | ||||
4817 | llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); | ||||
4818 | return ABIArgInfo::getDirect(CoerceTy); | ||||
4819 | } | ||||
4820 | } | ||||
4821 | |||||
4822 | if (isAggregateTypeForABI(RetTy)) { | ||||
4823 | // ELFv2 homogeneous aggregates are returned as array types. | ||||
4824 | const Type *Base = nullptr; | ||||
4825 | uint64_t Members = 0; | ||||
4826 | if (Kind == ELFv2 && | ||||
4827 | isHomogeneousAggregate(RetTy, Base, Members)) { | ||||
4828 | llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); | ||||
4829 | llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); | ||||
4830 | return ABIArgInfo::getDirect(CoerceTy); | ||||
4831 | } | ||||
4832 | |||||
4833 | // ELFv2 small aggregates are returned in up to two registers. | ||||
4834 | uint64_t Bits = getContext().getTypeSize(RetTy); | ||||
4835 | if (Kind == ELFv2 && Bits <= 2 * GPRBits) { | ||||
4836 | if (Bits == 0) | ||||
4837 | return ABIArgInfo::getIgnore(); | ||||
4838 | |||||
4839 | llvm::Type *CoerceTy; | ||||
4840 | if (Bits > GPRBits) { | ||||
4841 | CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); | ||||
4842 | CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy); | ||||
4843 | } else | ||||
4844 | CoerceTy = | ||||
4845 | llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); | ||||
4846 | return ABIArgInfo::getDirect(CoerceTy); | ||||
4847 | } | ||||
4848 | |||||
4849 | // All other aggregates are returned indirectly. | ||||
4850 | return getNaturalAlignIndirect(RetTy); | ||||
4851 | } | ||||
4852 | |||||
4853 | return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) | ||||
4854 | : ABIArgInfo::getDirect()); | ||||
4855 | } | ||||
4856 | |||||
4857 | // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. | ||||
4858 | Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
4859 | QualType Ty) const { | ||||
4860 | auto TypeInfo = getContext().getTypeInfoInChars(Ty); | ||||
4861 | TypeInfo.second = getParamTypeAlignment(Ty); | ||||
4862 | |||||
4863 | CharUnits SlotSize = CharUnits::fromQuantity(8); | ||||
4864 | |||||
4865 | // If we have a complex type and the base type is smaller than 8 bytes, | ||||
4866 | // the ABI calls for the real and imaginary parts to be right-adjusted | ||||
4867 | // in separate doublewords. However, Clang expects us to produce a | ||||
4868 | // pointer to a structure with the two parts packed tightly. So generate | ||||
4869 | // loads of the real and imaginary parts relative to the va_list pointer, | ||||
4870 | // and store them to a temporary structure. | ||||
4871 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { | ||||
4872 | CharUnits EltSize = TypeInfo.first / 2; | ||||
4873 | if (EltSize < SlotSize) { | ||||
4874 | Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, | ||||
4875 | SlotSize * 2, SlotSize, | ||||
4876 | SlotSize, /*AllowHigher*/ true); | ||||
4877 | |||||
4878 | Address RealAddr = Addr; | ||||
4879 | Address ImagAddr = RealAddr; | ||||
4880 | if (CGF.CGM.getDataLayout().isBigEndian()) { | ||||
4881 | RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, | ||||
4882 | SlotSize - EltSize); | ||||
4883 | ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, | ||||
4884 | 2 * SlotSize - EltSize); | ||||
4885 | } else { | ||||
4886 | ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); | ||||
4887 | } | ||||
4888 | |||||
4889 | llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); | ||||
4890 | RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); | ||||
4891 | ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); | ||||
4892 | llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); | ||||
4893 | llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); | ||||
4894 | |||||
4895 | Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); | ||||
4896 | CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), | ||||
4897 | /*init*/ true); | ||||
4898 | return Temp; | ||||
4899 | } | ||||
4900 | } | ||||
4901 | |||||
4902 | // Otherwise, just use the general rule. | ||||
4903 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, | ||||
4904 | TypeInfo, SlotSize, /*AllowHigher*/ true); | ||||
4905 | } | ||||
4906 | |||||
4907 | static bool | ||||
4908 | PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
4909 | llvm::Value *Address) { | ||||
4910 | // This is calculated from the LLVM and GCC tables and verified | ||||
4911 | // against gcc output. AFAIK all ABIs use the same encoding. | ||||
4912 | |||||
4913 | CodeGen::CGBuilderTy &Builder = CGF.Builder; | ||||
4914 | |||||
4915 | llvm::IntegerType *i8 = CGF.Int8Ty; | ||||
4916 | llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); | ||||
4917 | llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); | ||||
4918 | llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); | ||||
4919 | |||||
4920 | // 0-31: r0-31, the 8-byte general-purpose registers | ||||
4921 | AssignToArrayRange(Builder, Address, Eight8, 0, 31); | ||||
4922 | |||||
4923 | // 32-63: fp0-31, the 8-byte floating-point registers | ||||
4924 | AssignToArrayRange(Builder, Address, Eight8, 32, 63); | ||||
4925 | |||||
4926 | // 64-67 are various 8-byte special-purpose registers: | ||||
4927 | // 64: mq | ||||
4928 | // 65: lr | ||||
4929 | // 66: ctr | ||||
4930 | // 67: ap | ||||
4931 | AssignToArrayRange(Builder, Address, Eight8, 64, 67); | ||||
4932 | |||||
4933 | // 68-76 are various 4-byte special-purpose registers: | ||||
4934 | // 68-75 cr0-7 | ||||
4935 | // 76: xer | ||||
4936 | AssignToArrayRange(Builder, Address, Four8, 68, 76); | ||||
4937 | |||||
4938 | // 77-108: v0-31, the 16-byte vector registers | ||||
4939 | AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); | ||||
4940 | |||||
4941 | // 109: vrsave | ||||
4942 | // 110: vscr | ||||
4943 | // 111: spe_acc | ||||
4944 | // 112: spefscr | ||||
4945 | // 113: sfp | ||||
4946 | // 114: tfhar | ||||
4947 | // 115: tfiar | ||||
4948 | // 116: texasr | ||||
4949 | AssignToArrayRange(Builder, Address, Eight8, 109, 116); | ||||
4950 | |||||
4951 | return false; | ||||
4952 | } | ||||
4953 | |||||
4954 | bool | ||||
4955 | PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( | ||||
4956 | CodeGen::CodeGenFunction &CGF, | ||||
4957 | llvm::Value *Address) const { | ||||
4958 | |||||
4959 | return PPC64_initDwarfEHRegSizeTable(CGF, Address); | ||||
4960 | } | ||||
4961 | |||||
4962 | bool | ||||
4963 | PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
4964 | llvm::Value *Address) const { | ||||
4965 | |||||
4966 | return PPC64_initDwarfEHRegSizeTable(CGF, Address); | ||||
4967 | } | ||||
4968 | |||||
4969 | //===----------------------------------------------------------------------===// | ||||
4970 | // AArch64 ABI Implementation | ||||
4971 | //===----------------------------------------------------------------------===// | ||||
4972 | |||||
4973 | namespace { | ||||
4974 | |||||
4975 | class AArch64ABIInfo : public SwiftABIInfo { | ||||
4976 | public: | ||||
4977 | enum ABIKind { | ||||
4978 | AAPCS = 0, | ||||
4979 | DarwinPCS, | ||||
4980 | Win64 | ||||
4981 | }; | ||||
4982 | |||||
4983 | private: | ||||
4984 | ABIKind Kind; | ||||
4985 | |||||
4986 | public: | ||||
4987 | AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) | ||||
4988 | : SwiftABIInfo(CGT), Kind(Kind) {} | ||||
4989 | |||||
4990 | private: | ||||
4991 | ABIKind getABIKind() const { return Kind; } | ||||
4992 | bool isDarwinPCS() const { return Kind == DarwinPCS; } | ||||
4993 | |||||
4994 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
4995 | ABIArgInfo classifyArgumentType(QualType RetTy) const; | ||||
4996 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; | ||||
4997 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, | ||||
4998 | uint64_t Members) const override; | ||||
4999 | |||||
5000 | bool isIllegalVectorType(QualType Ty) const; | ||||
5001 | |||||
5002 | void computeInfo(CGFunctionInfo &FI) const override { | ||||
5003 | if (!::classifyReturnType(getCXXABI(), FI, *this)) | ||||
5004 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
5005 | |||||
5006 | for (auto &it : FI.arguments()) | ||||
5007 | it.info = classifyArgumentType(it.type); | ||||
5008 | } | ||||
5009 | |||||
5010 | Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, | ||||
5011 | CodeGenFunction &CGF) const; | ||||
5012 | |||||
5013 | Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, | ||||
5014 | CodeGenFunction &CGF) const; | ||||
5015 | |||||
5016 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
5017 | QualType Ty) const override { | ||||
5018 | return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) | ||||
5019 | : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) | ||||
5020 | : EmitAAPCSVAArg(VAListAddr, Ty, CGF); | ||||
5021 | } | ||||
5022 | |||||
5023 | Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
5024 | QualType Ty) const override; | ||||
5025 | |||||
5026 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, | ||||
5027 | bool asReturnValue) const override { | ||||
5028 | return occupiesMoreThan(CGT, scalars, /*total*/ 4); | ||||
5029 | } | ||||
5030 | bool isSwiftErrorInRegister() const override { | ||||
5031 | return true; | ||||
5032 | } | ||||
5033 | |||||
5034 | bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, | ||||
5035 | unsigned elts) const override; | ||||
5036 | }; | ||||
5037 | |||||
5038 | class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { | ||||
5039 | public: | ||||
5040 | AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) | ||||
5041 | : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} | ||||
5042 | |||||
5043 | StringRef getARCRetainAutoreleasedReturnValueMarker() const override { | ||||
5044 | return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue"; | ||||
5045 | } | ||||
5046 | |||||
5047 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { | ||||
5048 | return 31; | ||||
5049 | } | ||||
5050 | |||||
5051 | bool doesReturnSlotInterfereWithArgs() const override { return false; } | ||||
5052 | |||||
5053 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
5054 | CodeGen::CodeGenModule &CGM) const override { | ||||
5055 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); | ||||
5056 | if (!FD) | ||||
5057 | return; | ||||
5058 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
5059 | |||||
5060 | auto Kind = CGM.getCodeGenOpts().getSignReturnAddress(); | ||||
5061 | if (Kind != CodeGenOptions::SignReturnAddressScope::None) { | ||||
5062 | Fn->addFnAttr("sign-return-address", | ||||
5063 | Kind == CodeGenOptions::SignReturnAddressScope::All | ||||
5064 | ? "all" | ||||
5065 | : "non-leaf"); | ||||
5066 | |||||
5067 | auto Key = CGM.getCodeGenOpts().getSignReturnAddressKey(); | ||||
5068 | Fn->addFnAttr("sign-return-address-key", | ||||
5069 | Key == CodeGenOptions::SignReturnAddressKeyValue::AKey | ||||
5070 | ? "a_key" | ||||
5071 | : "b_key"); | ||||
5072 | } | ||||
5073 | |||||
5074 | if (CGM.getCodeGenOpts().BranchTargetEnforcement) | ||||
5075 | Fn->addFnAttr("branch-target-enforcement"); | ||||
5076 | } | ||||
5077 | }; | ||||
5078 | |||||
5079 | class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo { | ||||
5080 | public: | ||||
5081 | WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K) | ||||
5082 | : AArch64TargetCodeGenInfo(CGT, K) {} | ||||
5083 | |||||
5084 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
5085 | CodeGen::CodeGenModule &CGM) const override; | ||||
5086 | |||||
5087 | void getDependentLibraryOption(llvm::StringRef Lib, | ||||
5088 | llvm::SmallString<24> &Opt) const override { | ||||
5089 | Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); | ||||
5090 | } | ||||
5091 | |||||
5092 | void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, | ||||
5093 | llvm::SmallString<32> &Opt) const override { | ||||
5094 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; | ||||
5095 | } | ||||
5096 | }; | ||||
5097 | |||||
5098 | void WindowsAArch64TargetCodeGenInfo::setTargetAttributes( | ||||
5099 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { | ||||
5100 | AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); | ||||
5101 | if (GV->isDeclaration()) | ||||
5102 | return; | ||||
5103 | addStackProbeTargetAttributes(D, GV, CGM); | ||||
5104 | } | ||||
5105 | } | ||||
5106 | |||||
5107 | ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { | ||||
5108 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
5109 | |||||
5110 | // Handle illegal vector types here. | ||||
5111 | if (isIllegalVectorType(Ty)) { | ||||
5112 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
5113 | // Android promotes <2 x i8> to i16, not i32 | ||||
5114 | if (isAndroid() && (Size <= 16)) { | ||||
5115 | llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); | ||||
5116 | return ABIArgInfo::getDirect(ResType); | ||||
5117 | } | ||||
5118 | if (Size <= 32) { | ||||
5119 | llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); | ||||
5120 | return ABIArgInfo::getDirect(ResType); | ||||
5121 | } | ||||
5122 | if (Size == 64) { | ||||
5123 | llvm::Type *ResType = | ||||
5124 | llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); | ||||
5125 | return ABIArgInfo::getDirect(ResType); | ||||
5126 | } | ||||
5127 | if (Size == 128) { | ||||
5128 | llvm::Type *ResType = | ||||
5129 | llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); | ||||
5130 | return ABIArgInfo::getDirect(ResType); | ||||
5131 | } | ||||
5132 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
5133 | } | ||||
5134 | |||||
5135 | if (!isAggregateTypeForABI(Ty)) { | ||||
5136 | // Treat an enum type as its underlying type. | ||||
5137 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
5138 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
5139 | |||||
5140 | return (Ty->isPromotableIntegerType() && isDarwinPCS() | ||||
5141 | ? ABIArgInfo::getExtend(Ty) | ||||
5142 | : ABIArgInfo::getDirect()); | ||||
5143 | } | ||||
5144 | |||||
5145 | // Structures with either a non-trivial destructor or a non-trivial | ||||
5146 | // copy constructor are always indirect. | ||||
5147 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { | ||||
5148 | return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == | ||||
5149 | CGCXXABI::RAA_DirectInMemory); | ||||
5150 | } | ||||
5151 | |||||
5152 | // Empty records are always ignored on Darwin, but actually passed in C++ mode | ||||
5153 | // elsewhere for GNU compatibility. | ||||
5154 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
5155 | bool IsEmpty = isEmptyRecord(getContext(), Ty, true); | ||||
5156 | if (IsEmpty || Size == 0) { | ||||
5157 | if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) | ||||
5158 | return ABIArgInfo::getIgnore(); | ||||
5159 | |||||
5160 | // GNU C mode. The only argument that gets ignored is an empty one with size | ||||
5161 | // 0. | ||||
5162 | if (IsEmpty && Size == 0) | ||||
5163 | return ABIArgInfo::getIgnore(); | ||||
5164 | return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); | ||||
5165 | } | ||||
5166 | |||||
5167 | // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. | ||||
5168 | const Type *Base = nullptr; | ||||
5169 | uint64_t Members = 0; | ||||
5170 | if (isHomogeneousAggregate(Ty, Base, Members)) { | ||||
5171 | return ABIArgInfo::getDirect( | ||||
5172 | llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); | ||||
5173 | } | ||||
5174 | |||||
5175 | // Aggregates <= 16 bytes are passed directly in registers or on the stack. | ||||
5176 | if (Size <= 128) { | ||||
5177 | // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of | ||||
5178 | // same size and alignment. | ||||
5179 | if (getTarget().isRenderScriptTarget()) { | ||||
5180 | return coerceToIntArray(Ty, getContext(), getVMContext()); | ||||
5181 | } | ||||
5182 | unsigned Alignment; | ||||
5183 | if (Kind == AArch64ABIInfo::AAPCS) { | ||||
5184 | Alignment = getContext().getTypeUnadjustedAlign(Ty); | ||||
5185 | Alignment = Alignment < 128 ? 64 : 128; | ||||
5186 | } else { | ||||
5187 | Alignment = getContext().getTypeAlign(Ty); | ||||
5188 | } | ||||
5189 | Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes | ||||
5190 | |||||
5191 | // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. | ||||
5192 | // For aggregates with 16-byte alignment, we use i128. | ||||
5193 | if (Alignment < 128 && Size == 128) { | ||||
5194 | llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); | ||||
5195 | return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); | ||||
5196 | } | ||||
5197 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); | ||||
5198 | } | ||||
5199 | |||||
5200 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
5201 | } | ||||
5202 | |||||
5203 | ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { | ||||
5204 | if (RetTy->isVoidType()) | ||||
5205 | return ABIArgInfo::getIgnore(); | ||||
5206 | |||||
5207 | // Large vector types should be returned via memory. | ||||
5208 | if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) | ||||
5209 | return getNaturalAlignIndirect(RetTy); | ||||
5210 | |||||
5211 | if (!isAggregateTypeForABI(RetTy)) { | ||||
5212 | // Treat an enum type as its underlying type. | ||||
5213 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) | ||||
5214 | RetTy = EnumTy->getDecl()->getIntegerType(); | ||||
5215 | |||||
5216 | return (RetTy->isPromotableIntegerType() && isDarwinPCS() | ||||
5217 | ? ABIArgInfo::getExtend(RetTy) | ||||
5218 | : ABIArgInfo::getDirect()); | ||||
5219 | } | ||||
5220 | |||||
5221 | uint64_t Size = getContext().getTypeSize(RetTy); | ||||
5222 | if (isEmptyRecord(getContext(), RetTy, true) || Size == 0) | ||||
5223 | return ABIArgInfo::getIgnore(); | ||||
5224 | |||||
5225 | const Type *Base = nullptr; | ||||
5226 | uint64_t Members = 0; | ||||
5227 | if (isHomogeneousAggregate(RetTy, Base, Members)) | ||||
5228 | // Homogeneous Floating-point Aggregates (HFAs) are returned directly. | ||||
5229 | return ABIArgInfo::getDirect(); | ||||
5230 | |||||
5231 | // Aggregates <= 16 bytes are returned directly in registers or on the stack. | ||||
5232 | if (Size <= 128) { | ||||
5233 | // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of | ||||
5234 | // same size and alignment. | ||||
5235 | if (getTarget().isRenderScriptTarget()) { | ||||
5236 | return coerceToIntArray(RetTy, getContext(), getVMContext()); | ||||
5237 | } | ||||
5238 | unsigned Alignment = getContext().getTypeAlign(RetTy); | ||||
5239 | Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes | ||||
5240 | |||||
5241 | // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. | ||||
5242 | // For aggregates with 16-byte alignment, we use i128. | ||||
5243 | if (Alignment < 128 && Size == 128) { | ||||
5244 | llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); | ||||
5245 | return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); | ||||
5246 | } | ||||
5247 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); | ||||
5248 | } | ||||
5249 | |||||
5250 | return getNaturalAlignIndirect(RetTy); | ||||
5251 | } | ||||
5252 | |||||
5253 | /// isIllegalVectorType - check whether the vector type is legal for AArch64. | ||||
5254 | bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { | ||||
5255 | if (const VectorType *VT = Ty->getAs<VectorType>()) { | ||||
5256 | // Check whether VT is legal. | ||||
5257 | unsigned NumElements = VT->getNumElements(); | ||||
5258 | uint64_t Size = getContext().getTypeSize(VT); | ||||
5259 | // NumElements should be power of 2. | ||||
5260 | if (!llvm::isPowerOf2_32(NumElements)) | ||||
5261 | return true; | ||||
5262 | return Size != 64 && (Size != 128 || NumElements == 1); | ||||
5263 | } | ||||
5264 | return false; | ||||
5265 | } | ||||
5266 | |||||
5267 | bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize, | ||||
5268 | llvm::Type *eltTy, | ||||
5269 | unsigned elts) const { | ||||
5270 | if (!llvm::isPowerOf2_32(elts)) | ||||
5271 | return false; | ||||
5272 | if (totalSize.getQuantity() != 8 && | ||||
5273 | (totalSize.getQuantity() != 16 || elts == 1)) | ||||
5274 | return false; | ||||
5275 | return true; | ||||
5276 | } | ||||
5277 | |||||
5278 | bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { | ||||
5279 | // Homogeneous aggregates for AAPCS64 must have base types of a floating | ||||
5280 | // point type or a short-vector type. This is the same as the 32-bit ABI, | ||||
5281 | // but with the difference that any floating-point type is allowed, | ||||
5282 | // including __fp16. | ||||
5283 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { | ||||
5284 | if (BT->isFloatingPoint()) | ||||
5285 | return true; | ||||
5286 | } else if (const VectorType *VT = Ty->getAs<VectorType>()) { | ||||
5287 | unsigned VecSize = getContext().getTypeSize(VT); | ||||
5288 | if (VecSize == 64 || VecSize == 128) | ||||
5289 | return true; | ||||
5290 | } | ||||
5291 | return false; | ||||
5292 | } | ||||
5293 | |||||
5294 | bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, | ||||
5295 | uint64_t Members) const { | ||||
5296 | return Members <= 4; | ||||
5297 | } | ||||
5298 | |||||
5299 | Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, | ||||
5300 | QualType Ty, | ||||
5301 | CodeGenFunction &CGF) const { | ||||
5302 | ABIArgInfo AI = classifyArgumentType(Ty); | ||||
5303 | bool IsIndirect = AI.isIndirect(); | ||||
5304 | |||||
5305 | llvm::Type *BaseTy = CGF.ConvertType(Ty); | ||||
5306 | if (IsIndirect) | ||||
5307 | BaseTy = llvm::PointerType::getUnqual(BaseTy); | ||||
5308 | else if (AI.getCoerceToType()) | ||||
5309 | BaseTy = AI.getCoerceToType(); | ||||
5310 | |||||
5311 | unsigned NumRegs = 1; | ||||
5312 | if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { | ||||
5313 | BaseTy = ArrTy->getElementType(); | ||||
5314 | NumRegs = ArrTy->getNumElements(); | ||||
5315 | } | ||||
5316 | bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); | ||||
5317 | |||||
5318 | // The AArch64 va_list type and handling is specified in the Procedure Call | ||||
5319 | // Standard, section B.4: | ||||
5320 | // | ||||
5321 | // struct { | ||||
5322 | // void *__stack; | ||||
5323 | // void *__gr_top; | ||||
5324 | // void *__vr_top; | ||||
5325 | // int __gr_offs; | ||||
5326 | // int __vr_offs; | ||||
5327 | // }; | ||||
5328 | |||||
5329 | llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); | ||||
5330 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); | ||||
5331 | llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); | ||||
5332 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); | ||||
5333 | |||||
5334 | CharUnits TySize = getContext().getTypeSizeInChars(Ty); | ||||
5335 | CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty); | ||||
5336 | |||||
5337 | Address reg_offs_p = Address::invalid(); | ||||
5338 | llvm::Value *reg_offs = nullptr; | ||||
5339 | int reg_top_index; | ||||
5340 | int RegSize = IsIndirect ? 8 : TySize.getQuantity(); | ||||
5341 | if (!IsFPR) { | ||||
5342 | // 3 is the field number of __gr_offs | ||||
5343 | reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); | ||||
5344 | reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); | ||||
5345 | reg_top_index = 1; // field number for __gr_top | ||||
5346 | RegSize = llvm::alignTo(RegSize, 8); | ||||
5347 | } else { | ||||
5348 | // 4 is the field number of __vr_offs. | ||||
5349 | reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); | ||||
5350 | reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); | ||||
5351 | reg_top_index = 2; // field number for __vr_top | ||||
5352 | RegSize = 16 * NumRegs; | ||||
5353 | } | ||||
5354 | |||||
5355 | //======================================= | ||||
5356 | // Find out where argument was passed | ||||
5357 | //======================================= | ||||
5358 | |||||
5359 | // If reg_offs >= 0 we're already using the stack for this type of | ||||
5360 | // argument. We don't want to keep updating reg_offs (in case it overflows, | ||||
5361 | // though anyone passing 2GB of arguments, each at most 16 bytes, deserves | ||||
5362 | // whatever they get). | ||||
5363 | llvm::Value *UsingStack = nullptr; | ||||
5364 | UsingStack = CGF.Builder.CreateICmpSGE( | ||||
5365 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); | ||||
5366 | |||||
5367 | CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); | ||||
5368 | |||||
5369 | // Otherwise, at least some kind of argument could go in these registers, the | ||||
5370 | // question is whether this particular type is too big. | ||||
5371 | CGF.EmitBlock(MaybeRegBlock); | ||||
5372 | |||||
5373 | // Integer arguments may need to correct register alignment (for example a | ||||
5374 | // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we | ||||
5375 | // align __gr_offs to calculate the potential address. | ||||
5376 | if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { | ||||
5377 | int Align = TyAlign.getQuantity(); | ||||
5378 | |||||
5379 | reg_offs = CGF.Builder.CreateAdd( | ||||
5380 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), | ||||
5381 | "align_regoffs"); | ||||
5382 | reg_offs = CGF.Builder.CreateAnd( | ||||
5383 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), | ||||
5384 | "aligned_regoffs"); | ||||
5385 | } | ||||
5386 | |||||
5387 | // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. | ||||
5388 | // The fact that this is done unconditionally reflects the fact that | ||||
5389 | // allocating an argument to the stack also uses up all the remaining | ||||
5390 | // registers of the appropriate kind. | ||||
5391 | llvm::Value *NewOffset = nullptr; | ||||
5392 | NewOffset = CGF.Builder.CreateAdd( | ||||
5393 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); | ||||
5394 | CGF.Builder.CreateStore(NewOffset, reg_offs_p); | ||||
5395 | |||||
5396 | // Now we're in a position to decide whether this argument really was in | ||||
5397 | // registers or not. | ||||
5398 | llvm::Value *InRegs = nullptr; | ||||
5399 | InRegs = CGF.Builder.CreateICmpSLE( | ||||
5400 | NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); | ||||
5401 | |||||
5402 | CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); | ||||
5403 | |||||
5404 | //======================================= | ||||
5405 | // Argument was in registers | ||||
5406 | //======================================= | ||||
5407 | |||||
5408 | // Now we emit the code for if the argument was originally passed in | ||||
5409 | // registers. First start the appropriate block: | ||||
5410 | CGF.EmitBlock(InRegBlock); | ||||
5411 | |||||
5412 | llvm::Value *reg_top = nullptr; | ||||
5413 | Address reg_top_p = | ||||
5414 | CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); | ||||
5415 | reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); | ||||
5416 | Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs), | ||||
5417 | CharUnits::fromQuantity(IsFPR ? 16 : 8)); | ||||
5418 | Address RegAddr = Address::invalid(); | ||||
5419 | llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); | ||||
5420 | |||||
5421 | if (IsIndirect) { | ||||
5422 | // If it's been passed indirectly (actually a struct), whatever we find from | ||||
5423 | // stored registers or on the stack will actually be a struct **. | ||||
5424 | MemTy = llvm::PointerType::getUnqual(MemTy); | ||||
5425 | } | ||||
5426 | |||||
5427 | const Type *Base = nullptr; | ||||
5428 | uint64_t NumMembers = 0; | ||||
5429 | bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); | ||||
5430 | if (IsHFA && NumMembers > 1) { | ||||
5431 | // Homogeneous aggregates passed in registers will have their elements split | ||||
5432 | // and stored 16-bytes apart regardless of size (they're notionally in qN, | ||||
5433 | // qN+1, ...). We reload and store into a temporary local variable | ||||
5434 | // contiguously. | ||||
5435 | assert(!IsIndirect && "Homogeneous aggregates should be passed directly")((!IsIndirect && "Homogeneous aggregates should be passed directly" ) ? static_cast<void> (0) : __assert_fail ("!IsIndirect && \"Homogeneous aggregates should be passed directly\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 5435, __PRETTY_FUNCTION__)); | ||||
5436 | auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); | ||||
5437 | llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); | ||||
5438 | llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); | ||||
5439 | Address Tmp = CGF.CreateTempAlloca(HFATy, | ||||
5440 | std::max(TyAlign, BaseTyInfo.second)); | ||||
5441 | |||||
5442 | // On big-endian platforms, the value will be right-aligned in its slot. | ||||
5443 | int Offset = 0; | ||||
5444 | if (CGF.CGM.getDataLayout().isBigEndian() && | ||||
5445 | BaseTyInfo.first.getQuantity() < 16) | ||||
5446 | Offset = 16 - BaseTyInfo.first.getQuantity(); | ||||
5447 | |||||
5448 | for (unsigned i = 0; i < NumMembers; ++i) { | ||||
5449 | CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); | ||||
5450 | Address LoadAddr = | ||||
5451 | CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); | ||||
5452 | LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); | ||||
5453 | |||||
5454 | Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i); | ||||
5455 | |||||
5456 | llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); | ||||
5457 | CGF.Builder.CreateStore(Elem, StoreAddr); | ||||
5458 | } | ||||
5459 | |||||
5460 | RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); | ||||
5461 | } else { | ||||
5462 | // Otherwise the object is contiguous in memory. | ||||
5463 | |||||
5464 | // It might be right-aligned in its slot. | ||||
5465 | CharUnits SlotSize = BaseAddr.getAlignment(); | ||||
5466 | if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && | ||||
5467 | (IsHFA || !isAggregateTypeForABI(Ty)) && | ||||
5468 | TySize < SlotSize) { | ||||
5469 | CharUnits Offset = SlotSize - TySize; | ||||
5470 | BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); | ||||
5471 | } | ||||
5472 | |||||
5473 | RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); | ||||
5474 | } | ||||
5475 | |||||
5476 | CGF.EmitBranch(ContBlock); | ||||
5477 | |||||
5478 | //======================================= | ||||
5479 | // Argument was on the stack | ||||
5480 | //======================================= | ||||
5481 | CGF.EmitBlock(OnStackBlock); | ||||
5482 | |||||
5483 | Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); | ||||
5484 | llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); | ||||
5485 | |||||
5486 | // Again, stack arguments may need realignment. In this case both integer and | ||||
5487 | // floating-point ones might be affected. | ||||
5488 | if (!IsIndirect && TyAlign.getQuantity() > 8) { | ||||
5489 | int Align = TyAlign.getQuantity(); | ||||
5490 | |||||
5491 | OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); | ||||
5492 | |||||
5493 | OnStackPtr = CGF.Builder.CreateAdd( | ||||
5494 | OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), | ||||
5495 | "align_stack"); | ||||
5496 | OnStackPtr = CGF.Builder.CreateAnd( | ||||
5497 | OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), | ||||
5498 | "align_stack"); | ||||
5499 | |||||
5500 | OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); | ||||
5501 | } | ||||
5502 | Address OnStackAddr(OnStackPtr, | ||||
5503 | std::max(CharUnits::fromQuantity(8), TyAlign)); | ||||
5504 | |||||
5505 | // All stack slots are multiples of 8 bytes. | ||||
5506 | CharUnits StackSlotSize = CharUnits::fromQuantity(8); | ||||
5507 | CharUnits StackSize; | ||||
5508 | if (IsIndirect) | ||||
5509 | StackSize = StackSlotSize; | ||||
5510 | else | ||||
5511 | StackSize = TySize.alignTo(StackSlotSize); | ||||
5512 | |||||
5513 | llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); | ||||
5514 | llvm::Value *NewStack = | ||||
5515 | CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack"); | ||||
5516 | |||||
5517 | // Write the new value of __stack for the next call to va_arg | ||||
5518 | CGF.Builder.CreateStore(NewStack, stack_p); | ||||
5519 | |||||
5520 | if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && | ||||
5521 | TySize < StackSlotSize) { | ||||
5522 | CharUnits Offset = StackSlotSize - TySize; | ||||
5523 | OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); | ||||
5524 | } | ||||
5525 | |||||
5526 | OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); | ||||
5527 | |||||
5528 | CGF.EmitBranch(ContBlock); | ||||
5529 | |||||
5530 | //======================================= | ||||
5531 | // Tidy up | ||||
5532 | //======================================= | ||||
5533 | CGF.EmitBlock(ContBlock); | ||||
5534 | |||||
5535 | Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, | ||||
5536 | OnStackAddr, OnStackBlock, "vaargs.addr"); | ||||
5537 | |||||
5538 | if (IsIndirect) | ||||
5539 | return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), | ||||
5540 | TyAlign); | ||||
5541 | |||||
5542 | return ResAddr; | ||||
5543 | } | ||||
5544 | |||||
5545 | Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, | ||||
5546 | CodeGenFunction &CGF) const { | ||||
5547 | // The backend's lowering doesn't support va_arg for aggregates or | ||||
5548 | // illegal vector types. Lower VAArg here for these cases and use | ||||
5549 | // the LLVM va_arg instruction for everything else. | ||||
5550 | if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) | ||||
5551 | return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); | ||||
5552 | |||||
5553 | CharUnits SlotSize = CharUnits::fromQuantity(8); | ||||
5554 | |||||
5555 | // Empty records are ignored for parameter passing purposes. | ||||
5556 | if (isEmptyRecord(getContext(), Ty, true)) { | ||||
5557 | Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); | ||||
5558 | Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); | ||||
5559 | return Addr; | ||||
5560 | } | ||||
5561 | |||||
5562 | // The size of the actual thing passed, which might end up just | ||||
5563 | // being a pointer for indirect types. | ||||
5564 | auto TyInfo = getContext().getTypeInfoInChars(Ty); | ||||
5565 | |||||
5566 | // Arguments bigger than 16 bytes which aren't homogeneous | ||||
5567 | // aggregates should be passed indirectly. | ||||
5568 | bool IsIndirect = false; | ||||
5569 | if (TyInfo.first.getQuantity() > 16) { | ||||
5570 | const Type *Base = nullptr; | ||||
5571 | uint64_t Members = 0; | ||||
5572 | IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); | ||||
5573 | } | ||||
5574 | |||||
5575 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, | ||||
5576 | TyInfo, SlotSize, /*AllowHigherAlign*/ true); | ||||
5577 | } | ||||
5578 | |||||
5579 | Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
5580 | QualType Ty) const { | ||||
5581 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, | ||||
5582 | CGF.getContext().getTypeInfoInChars(Ty), | ||||
5583 | CharUnits::fromQuantity(8), | ||||
5584 | /*allowHigherAlign*/ false); | ||||
5585 | } | ||||
5586 | |||||
5587 | //===----------------------------------------------------------------------===// | ||||
5588 | // ARM ABI Implementation | ||||
5589 | //===----------------------------------------------------------------------===// | ||||
5590 | |||||
5591 | namespace { | ||||
5592 | |||||
5593 | class ARMABIInfo : public SwiftABIInfo { | ||||
5594 | public: | ||||
5595 | enum ABIKind { | ||||
5596 | APCS = 0, | ||||
5597 | AAPCS = 1, | ||||
5598 | AAPCS_VFP = 2, | ||||
5599 | AAPCS16_VFP = 3, | ||||
5600 | }; | ||||
5601 | |||||
5602 | private: | ||||
5603 | ABIKind Kind; | ||||
5604 | |||||
5605 | public: | ||||
5606 | ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) | ||||
5607 | : SwiftABIInfo(CGT), Kind(_Kind) { | ||||
5608 | setCCs(); | ||||
5609 | } | ||||
5610 | |||||
5611 | bool isEABI() const { | ||||
5612 | switch (getTarget().getTriple().getEnvironment()) { | ||||
5613 | case llvm::Triple::Android: | ||||
5614 | case llvm::Triple::EABI: | ||||
5615 | case llvm::Triple::EABIHF: | ||||
5616 | case llvm::Triple::GNUEABI: | ||||
5617 | case llvm::Triple::GNUEABIHF: | ||||
5618 | case llvm::Triple::MuslEABI: | ||||
5619 | case llvm::Triple::MuslEABIHF: | ||||
5620 | return true; | ||||
5621 | default: | ||||
5622 | return false; | ||||
5623 | } | ||||
5624 | } | ||||
5625 | |||||
5626 | bool isEABIHF() const { | ||||
5627 | switch (getTarget().getTriple().getEnvironment()) { | ||||
5628 | case llvm::Triple::EABIHF: | ||||
5629 | case llvm::Triple::GNUEABIHF: | ||||
5630 | case llvm::Triple::MuslEABIHF: | ||||
5631 | return true; | ||||
5632 | default: | ||||
5633 | return false; | ||||
5634 | } | ||||
5635 | } | ||||
5636 | |||||
5637 | ABIKind getABIKind() const { return Kind; } | ||||
5638 | |||||
5639 | private: | ||||
5640 | ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic, | ||||
5641 | unsigned functionCallConv) const; | ||||
5642 | ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, | ||||
5643 | unsigned functionCallConv) const; | ||||
5644 | ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base, | ||||
5645 | uint64_t Members) const; | ||||
5646 | ABIArgInfo coerceIllegalVector(QualType Ty) const; | ||||
5647 | bool isIllegalVectorType(QualType Ty) const; | ||||
5648 | bool containsAnyFP16Vectors(QualType Ty) const; | ||||
5649 | |||||
5650 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; | ||||
5651 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, | ||||
5652 | uint64_t Members) const override; | ||||
5653 | |||||
5654 | bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const; | ||||
5655 | |||||
5656 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
5657 | |||||
5658 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
5659 | QualType Ty) const override; | ||||
5660 | |||||
5661 | llvm::CallingConv::ID getLLVMDefaultCC() const; | ||||
5662 | llvm::CallingConv::ID getABIDefaultCC() const; | ||||
5663 | void setCCs(); | ||||
5664 | |||||
5665 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, | ||||
5666 | bool asReturnValue) const override { | ||||
5667 | return occupiesMoreThan(CGT, scalars, /*total*/ 4); | ||||
5668 | } | ||||
5669 | bool isSwiftErrorInRegister() const override { | ||||
5670 | return true; | ||||
5671 | } | ||||
5672 | bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, | ||||
5673 | unsigned elts) const override; | ||||
5674 | }; | ||||
5675 | |||||
5676 | class ARMTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
5677 | public: | ||||
5678 | ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) | ||||
5679 | :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} | ||||
5680 | |||||
5681 | const ARMABIInfo &getABIInfo() const { | ||||
5682 | return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); | ||||
5683 | } | ||||
5684 | |||||
5685 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { | ||||
5686 | return 13; | ||||
5687 | } | ||||
5688 | |||||
5689 | StringRef getARCRetainAutoreleasedReturnValueMarker() const override { | ||||
5690 | return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue"; | ||||
5691 | } | ||||
5692 | |||||
5693 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
5694 | llvm::Value *Address) const override { | ||||
5695 | llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); | ||||
5696 | |||||
5697 | // 0-15 are the 16 integer registers. | ||||
5698 | AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); | ||||
5699 | return false; | ||||
5700 | } | ||||
5701 | |||||
5702 | unsigned getSizeOfUnwindException() const override { | ||||
5703 | if (getABIInfo().isEABI()) return 88; | ||||
5704 | return TargetCodeGenInfo::getSizeOfUnwindException(); | ||||
5705 | } | ||||
5706 | |||||
5707 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
5708 | CodeGen::CodeGenModule &CGM) const override { | ||||
5709 | if (GV->isDeclaration()) | ||||
5710 | return; | ||||
5711 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); | ||||
5712 | if (!FD) | ||||
5713 | return; | ||||
5714 | |||||
5715 | const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); | ||||
5716 | if (!Attr) | ||||
5717 | return; | ||||
5718 | |||||
5719 | const char *Kind; | ||||
5720 | switch (Attr->getInterrupt()) { | ||||
5721 | case ARMInterruptAttr::Generic: Kind = ""; break; | ||||
5722 | case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; | ||||
5723 | case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; | ||||
5724 | case ARMInterruptAttr::SWI: Kind = "SWI"; break; | ||||
5725 | case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; | ||||
5726 | case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; | ||||
5727 | } | ||||
5728 | |||||
5729 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
5730 | |||||
5731 | Fn->addFnAttr("interrupt", Kind); | ||||
5732 | |||||
5733 | ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind(); | ||||
5734 | if (ABI == ARMABIInfo::APCS) | ||||
5735 | return; | ||||
5736 | |||||
5737 | // AAPCS guarantees that sp will be 8-byte aligned on any public interface, | ||||
5738 | // however this is not necessarily true on taking any interrupt. Instruct | ||||
5739 | // the backend to perform a realignment as part of the function prologue. | ||||
5740 | llvm::AttrBuilder B; | ||||
5741 | B.addStackAlignmentAttr(8); | ||||
5742 | Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); | ||||
5743 | } | ||||
5744 | }; | ||||
5745 | |||||
5746 | class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { | ||||
5747 | public: | ||||
5748 | WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) | ||||
5749 | : ARMTargetCodeGenInfo(CGT, K) {} | ||||
5750 | |||||
5751 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
5752 | CodeGen::CodeGenModule &CGM) const override; | ||||
5753 | |||||
5754 | void getDependentLibraryOption(llvm::StringRef Lib, | ||||
5755 | llvm::SmallString<24> &Opt) const override { | ||||
5756 | Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); | ||||
5757 | } | ||||
5758 | |||||
5759 | void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, | ||||
5760 | llvm::SmallString<32> &Opt) const override { | ||||
5761 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; | ||||
5762 | } | ||||
5763 | }; | ||||
5764 | |||||
5765 | void WindowsARMTargetCodeGenInfo::setTargetAttributes( | ||||
5766 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { | ||||
5767 | ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); | ||||
5768 | if (GV->isDeclaration()) | ||||
5769 | return; | ||||
5770 | addStackProbeTargetAttributes(D, GV, CGM); | ||||
5771 | } | ||||
5772 | } | ||||
5773 | |||||
5774 | void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
5775 | if (!::classifyReturnType(getCXXABI(), FI, *this)) | ||||
5776 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(), | ||||
5777 | FI.getCallingConvention()); | ||||
5778 | |||||
5779 | for (auto &I : FI.arguments()) | ||||
5780 | I.info = classifyArgumentType(I.type, FI.isVariadic(), | ||||
5781 | FI.getCallingConvention()); | ||||
5782 | |||||
5783 | |||||
5784 | // Always honor user-specified calling convention. | ||||
5785 | if (FI.getCallingConvention() != llvm::CallingConv::C) | ||||
5786 | return; | ||||
5787 | |||||
5788 | llvm::CallingConv::ID cc = getRuntimeCC(); | ||||
5789 | if (cc != llvm::CallingConv::C) | ||||
5790 | FI.setEffectiveCallingConvention(cc); | ||||
5791 | } | ||||
5792 | |||||
5793 | /// Return the default calling convention that LLVM will use. | ||||
5794 | llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { | ||||
5795 | // The default calling convention that LLVM will infer. | ||||
5796 | if (isEABIHF() || getTarget().getTriple().isWatchABI()) | ||||
5797 | return llvm::CallingConv::ARM_AAPCS_VFP; | ||||
5798 | else if (isEABI()) | ||||
5799 | return llvm::CallingConv::ARM_AAPCS; | ||||
5800 | else | ||||
5801 | return llvm::CallingConv::ARM_APCS; | ||||
5802 | } | ||||
5803 | |||||
5804 | /// Return the calling convention that our ABI would like us to use | ||||
5805 | /// as the C calling convention. | ||||
5806 | llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { | ||||
5807 | switch (getABIKind()) { | ||||
5808 | case APCS: return llvm::CallingConv::ARM_APCS; | ||||
5809 | case AAPCS: return llvm::CallingConv::ARM_AAPCS; | ||||
5810 | case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; | ||||
5811 | case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; | ||||
5812 | } | ||||
5813 | llvm_unreachable("bad ABI kind")::llvm::llvm_unreachable_internal("bad ABI kind", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 5813); | ||||
5814 | } | ||||
5815 | |||||
5816 | void ARMABIInfo::setCCs() { | ||||
5817 | assert(getRuntimeCC() == llvm::CallingConv::C)((getRuntimeCC() == llvm::CallingConv::C) ? static_cast<void > (0) : __assert_fail ("getRuntimeCC() == llvm::CallingConv::C" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 5817, __PRETTY_FUNCTION__)); | ||||
5818 | |||||
5819 | // Don't muddy up the IR with a ton of explicit annotations if | ||||
5820 | // they'd just match what LLVM will infer from the triple. | ||||
5821 | llvm::CallingConv::ID abiCC = getABIDefaultCC(); | ||||
5822 | if (abiCC != getLLVMDefaultCC()) | ||||
5823 | RuntimeCC = abiCC; | ||||
5824 | } | ||||
5825 | |||||
5826 | ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const { | ||||
5827 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
5828 | if (Size <= 32) { | ||||
5829 | llvm::Type *ResType = | ||||
5830 | llvm::Type::getInt32Ty(getVMContext()); | ||||
5831 | return ABIArgInfo::getDirect(ResType); | ||||
5832 | } | ||||
5833 | if (Size == 64 || Size == 128) { | ||||
5834 | llvm::Type *ResType = llvm::VectorType::get( | ||||
5835 | llvm::Type::getInt32Ty(getVMContext()), Size / 32); | ||||
5836 | return ABIArgInfo::getDirect(ResType); | ||||
5837 | } | ||||
5838 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
5839 | } | ||||
5840 | |||||
5841 | ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty, | ||||
5842 | const Type *Base, | ||||
5843 | uint64_t Members) const { | ||||
5844 | assert(Base && "Base class should be set for homogeneous aggregate")((Base && "Base class should be set for homogeneous aggregate" ) ? static_cast<void> (0) : __assert_fail ("Base && \"Base class should be set for homogeneous aggregate\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 5844, __PRETTY_FUNCTION__)); | ||||
5845 | // Base can be a floating-point or a vector. | ||||
5846 | if (const VectorType *VT = Base->getAs<VectorType>()) { | ||||
5847 | // FP16 vectors should be converted to integer vectors | ||||
5848 | if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) { | ||||
5849 | uint64_t Size = getContext().getTypeSize(VT); | ||||
5850 | llvm::Type *NewVecTy = llvm::VectorType::get( | ||||
5851 | llvm::Type::getInt32Ty(getVMContext()), Size / 32); | ||||
5852 | llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members); | ||||
5853 | return ABIArgInfo::getDirect(Ty, 0, nullptr, false); | ||||
5854 | } | ||||
5855 | } | ||||
5856 | return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); | ||||
5857 | } | ||||
5858 | |||||
5859 | ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, | ||||
5860 | unsigned functionCallConv) const { | ||||
5861 | // 6.1.2.1 The following argument types are VFP CPRCs: | ||||
5862 | // A single-precision floating-point type (including promoted | ||||
5863 | // half-precision types); A double-precision floating-point type; | ||||
5864 | // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate | ||||
5865 | // with a Base Type of a single- or double-precision floating-point type, | ||||
5866 | // 64-bit containerized vectors or 128-bit containerized vectors with one | ||||
5867 | // to four Elements. | ||||
5868 | // Variadic functions should always marshal to the base standard. | ||||
5869 | bool IsAAPCS_VFP = | ||||
5870 | !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false); | ||||
5871 | |||||
5872 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
5873 | |||||
5874 | // Handle illegal vector types here. | ||||
5875 | if (isIllegalVectorType(Ty)) | ||||
5876 | return coerceIllegalVector(Ty); | ||||
5877 | |||||
5878 | // _Float16 and __fp16 get passed as if it were an int or float, but with | ||||
5879 | // the top 16 bits unspecified. This is not done for OpenCL as it handles the | ||||
5880 | // half type natively, and does not need to interwork with AAPCS code. | ||||
5881 | if ((Ty->isFloat16Type() || Ty->isHalfType()) && | ||||
5882 | !getContext().getLangOpts().NativeHalfArgsAndReturns) { | ||||
5883 | llvm::Type *ResType = IsAAPCS_VFP ? | ||||
5884 | llvm::Type::getFloatTy(getVMContext()) : | ||||
5885 | llvm::Type::getInt32Ty(getVMContext()); | ||||
5886 | return ABIArgInfo::getDirect(ResType); | ||||
5887 | } | ||||
5888 | |||||
5889 | if (!isAggregateTypeForABI(Ty)) { | ||||
5890 | // Treat an enum type as its underlying type. | ||||
5891 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { | ||||
5892 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
5893 | } | ||||
5894 | |||||
5895 | return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) | ||||
5896 | : ABIArgInfo::getDirect()); | ||||
5897 | } | ||||
5898 | |||||
5899 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { | ||||
5900 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
5901 | } | ||||
5902 | |||||
5903 | // Ignore empty records. | ||||
5904 | if (isEmptyRecord(getContext(), Ty, true)) | ||||
5905 | return ABIArgInfo::getIgnore(); | ||||
5906 | |||||
5907 | if (IsAAPCS_VFP) { | ||||
5908 | // Homogeneous Aggregates need to be expanded when we can fit the aggregate | ||||
5909 | // into VFP registers. | ||||
5910 | const Type *Base = nullptr; | ||||
5911 | uint64_t Members = 0; | ||||
5912 | if (isHomogeneousAggregate(Ty, Base, Members)) | ||||
5913 | return classifyHomogeneousAggregate(Ty, Base, Members); | ||||
5914 | } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { | ||||
5915 | // WatchOS does have homogeneous aggregates. Note that we intentionally use | ||||
5916 | // this convention even for a variadic function: the backend will use GPRs | ||||
5917 | // if needed. | ||||
5918 | const Type *Base = nullptr; | ||||
5919 | uint64_t Members = 0; | ||||
5920 | if (isHomogeneousAggregate(Ty, Base, Members)) { | ||||
5921 | assert(Base && Members <= 4 && "unexpected homogeneous aggregate")((Base && Members <= 4 && "unexpected homogeneous aggregate" ) ? static_cast<void> (0) : __assert_fail ("Base && Members <= 4 && \"unexpected homogeneous aggregate\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 5921, __PRETTY_FUNCTION__)); | ||||
5922 | llvm::Type *Ty = | ||||
5923 | llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); | ||||
5924 | return ABIArgInfo::getDirect(Ty, 0, nullptr, false); | ||||
5925 | } | ||||
5926 | } | ||||
5927 | |||||
5928 | if (getABIKind() == ARMABIInfo::AAPCS16_VFP && | ||||
5929 | getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { | ||||
5930 | // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're | ||||
5931 | // bigger than 128-bits, they get placed in space allocated by the caller, | ||||
5932 | // and a pointer is passed. | ||||
5933 | return ABIArgInfo::getIndirect( | ||||
5934 | CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); | ||||
5935 | } | ||||
5936 | |||||
5937 | // Support byval for ARM. | ||||
5938 | // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at | ||||
5939 | // most 8-byte. We realign the indirect argument if type alignment is bigger | ||||
5940 | // than ABI alignment. | ||||
5941 | uint64_t ABIAlign = 4; | ||||
5942 | uint64_t TyAlign; | ||||
5943 | if (getABIKind() == ARMABIInfo::AAPCS_VFP || | ||||
5944 | getABIKind() == ARMABIInfo::AAPCS) { | ||||
5945 | TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); | ||||
5946 | ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); | ||||
5947 | } else { | ||||
5948 | TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); | ||||
5949 | } | ||||
5950 | if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { | ||||
5951 | assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval")((getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval" ) ? static_cast<void> (0) : __assert_fail ("getABIKind() != ARMABIInfo::AAPCS16_VFP && \"unexpected byval\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 5951, __PRETTY_FUNCTION__)); | ||||
5952 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), | ||||
5953 | /*ByVal=*/true, | ||||
5954 | /*Realign=*/TyAlign > ABIAlign); | ||||
5955 | } | ||||
5956 | |||||
5957 | // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of | ||||
5958 | // same size and alignment. | ||||
5959 | if (getTarget().isRenderScriptTarget()) { | ||||
5960 | return coerceToIntArray(Ty, getContext(), getVMContext()); | ||||
5961 | } | ||||
5962 | |||||
5963 | // Otherwise, pass by coercing to a structure of the appropriate size. | ||||
5964 | llvm::Type* ElemTy; | ||||
5965 | unsigned SizeRegs; | ||||
5966 | // FIXME: Try to match the types of the arguments more accurately where | ||||
5967 | // we can. | ||||
5968 | if (TyAlign <= 4) { | ||||
5969 | ElemTy = llvm::Type::getInt32Ty(getVMContext()); | ||||
5970 | SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; | ||||
5971 | } else { | ||||
5972 | ElemTy = llvm::Type::getInt64Ty(getVMContext()); | ||||
5973 | SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; | ||||
5974 | } | ||||
5975 | |||||
5976 | return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); | ||||
5977 | } | ||||
5978 | |||||
5979 | static bool isIntegerLikeType(QualType Ty, ASTContext &Context, | ||||
5980 | llvm::LLVMContext &VMContext) { | ||||
5981 | // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure | ||||
5982 | // is called integer-like if its size is less than or equal to one word, and | ||||
5983 | // the offset of each of its addressable sub-fields is zero. | ||||
5984 | |||||
5985 | uint64_t Size = Context.getTypeSize(Ty); | ||||
5986 | |||||
5987 | // Check that the type fits in a word. | ||||
5988 | if (Size > 32) | ||||
5989 | return false; | ||||
5990 | |||||
5991 | // FIXME: Handle vector types! | ||||
5992 | if (Ty->isVectorType()) | ||||
5993 | return false; | ||||
5994 | |||||
5995 | // Float types are never treated as "integer like". | ||||
5996 | if (Ty->isRealFloatingType()) | ||||
5997 | return false; | ||||
5998 | |||||
5999 | // If this is a builtin or pointer type then it is ok. | ||||
6000 | if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) | ||||
6001 | return true; | ||||
6002 | |||||
6003 | // Small complex integer types are "integer like". | ||||
6004 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) | ||||
6005 | return isIntegerLikeType(CT->getElementType(), Context, VMContext); | ||||
6006 | |||||
6007 | // Single element and zero sized arrays should be allowed, by the definition | ||||
6008 | // above, but they are not. | ||||
6009 | |||||
6010 | // Otherwise, it must be a record type. | ||||
6011 | const RecordType *RT = Ty->getAs<RecordType>(); | ||||
6012 | if (!RT) return false; | ||||
6013 | |||||
6014 | // Ignore records with flexible arrays. | ||||
6015 | const RecordDecl *RD = RT->getDecl(); | ||||
6016 | if (RD->hasFlexibleArrayMember()) | ||||
6017 | return false; | ||||
6018 | |||||
6019 | // Check that all sub-fields are at offset 0, and are themselves "integer | ||||
6020 | // like". | ||||
6021 | const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); | ||||
6022 | |||||
6023 | bool HadField = false; | ||||
6024 | unsigned idx = 0; | ||||
6025 | for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); | ||||
6026 | i != e; ++i, ++idx) { | ||||
6027 | const FieldDecl *FD = *i; | ||||
6028 | |||||
6029 | // Bit-fields are not addressable, we only need to verify they are "integer | ||||
6030 | // like". We still have to disallow a subsequent non-bitfield, for example: | ||||
6031 | // struct { int : 0; int x } | ||||
6032 | // is non-integer like according to gcc. | ||||
6033 | if (FD->isBitField()) { | ||||
6034 | if (!RD->isUnion()) | ||||
6035 | HadField = true; | ||||
6036 | |||||
6037 | if (!isIntegerLikeType(FD->getType(), Context, VMContext)) | ||||
6038 | return false; | ||||
6039 | |||||
6040 | continue; | ||||
6041 | } | ||||
6042 | |||||
6043 | // Check if this field is at offset 0. | ||||
6044 | if (Layout.getFieldOffset(idx) != 0) | ||||
6045 | return false; | ||||
6046 | |||||
6047 | if (!isIntegerLikeType(FD->getType(), Context, VMContext)) | ||||
6048 | return false; | ||||
6049 | |||||
6050 | // Only allow at most one field in a structure. This doesn't match the | ||||
6051 | // wording above, but follows gcc in situations with a field following an | ||||
6052 | // empty structure. | ||||
6053 | if (!RD->isUnion()) { | ||||
6054 | if (HadField) | ||||
6055 | return false; | ||||
6056 | |||||
6057 | HadField = true; | ||||
6058 | } | ||||
6059 | } | ||||
6060 | |||||
6061 | return true; | ||||
6062 | } | ||||
6063 | |||||
6064 | ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic, | ||||
6065 | unsigned functionCallConv) const { | ||||
6066 | |||||
6067 | // Variadic functions should always marshal to the base standard. | ||||
6068 | bool IsAAPCS_VFP = | ||||
6069 | !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true); | ||||
6070 | |||||
6071 | if (RetTy->isVoidType()) | ||||
6072 | return ABIArgInfo::getIgnore(); | ||||
6073 | |||||
6074 | if (const VectorType *VT = RetTy->getAs<VectorType>()) { | ||||
6075 | // Large vector types should be returned via memory. | ||||
6076 | if (getContext().getTypeSize(RetTy) > 128) | ||||
6077 | return getNaturalAlignIndirect(RetTy); | ||||
6078 | // FP16 vectors should be converted to integer vectors | ||||
6079 | if (!getTarget().hasLegalHalfType() && | ||||
6080 | (VT->getElementType()->isFloat16Type() || | ||||
6081 | VT->getElementType()->isHalfType())) | ||||
6082 | return coerceIllegalVector(RetTy); | ||||
6083 | } | ||||
6084 | |||||
6085 | // _Float16 and __fp16 get returned as if it were an int or float, but with | ||||
6086 | // the top 16 bits unspecified. This is not done for OpenCL as it handles the | ||||
6087 | // half type natively, and does not need to interwork with AAPCS code. | ||||
6088 | if ((RetTy->isFloat16Type() || RetTy->isHalfType()) && | ||||
6089 | !getContext().getLangOpts().NativeHalfArgsAndReturns) { | ||||
6090 | llvm::Type *ResType = IsAAPCS_VFP ? | ||||
6091 | llvm::Type::getFloatTy(getVMContext()) : | ||||
6092 | llvm::Type::getInt32Ty(getVMContext()); | ||||
6093 | return ABIArgInfo::getDirect(ResType); | ||||
6094 | } | ||||
6095 | |||||
6096 | if (!isAggregateTypeForABI(RetTy)) { | ||||
6097 | // Treat an enum type as its underlying type. | ||||
6098 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) | ||||
6099 | RetTy = EnumTy->getDecl()->getIntegerType(); | ||||
6100 | |||||
6101 | return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) | ||||
6102 | : ABIArgInfo::getDirect(); | ||||
6103 | } | ||||
6104 | |||||
6105 | // Are we following APCS? | ||||
6106 | if (getABIKind() == APCS) { | ||||
6107 | if (isEmptyRecord(getContext(), RetTy, false)) | ||||
6108 | return ABIArgInfo::getIgnore(); | ||||
6109 | |||||
6110 | // Complex types are all returned as packed integers. | ||||
6111 | // | ||||
6112 | // FIXME: Consider using 2 x vector types if the back end handles them | ||||
6113 | // correctly. | ||||
6114 | if (RetTy->isAnyComplexType()) | ||||
6115 | return ABIArgInfo::getDirect(llvm::IntegerType::get( | ||||
6116 | getVMContext(), getContext().getTypeSize(RetTy))); | ||||
6117 | |||||
6118 | // Integer like structures are returned in r0. | ||||
6119 | if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { | ||||
6120 | // Return in the smallest viable integer type. | ||||
6121 | uint64_t Size = getContext().getTypeSize(RetTy); | ||||
6122 | if (Size <= 8) | ||||
6123 | return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); | ||||
6124 | if (Size <= 16) | ||||
6125 | return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); | ||||
6126 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); | ||||
6127 | } | ||||
6128 | |||||
6129 | // Otherwise return in memory. | ||||
6130 | return getNaturalAlignIndirect(RetTy); | ||||
6131 | } | ||||
6132 | |||||
6133 | // Otherwise this is an AAPCS variant. | ||||
6134 | |||||
6135 | if (isEmptyRecord(getContext(), RetTy, true)) | ||||
6136 | return ABIArgInfo::getIgnore(); | ||||
6137 | |||||
6138 | // Check for homogeneous aggregates with AAPCS-VFP. | ||||
6139 | if (IsAAPCS_VFP) { | ||||
6140 | const Type *Base = nullptr; | ||||
6141 | uint64_t Members = 0; | ||||
6142 | if (isHomogeneousAggregate(RetTy, Base, Members)) | ||||
6143 | return classifyHomogeneousAggregate(RetTy, Base, Members); | ||||
6144 | } | ||||
6145 | |||||
6146 | // Aggregates <= 4 bytes are returned in r0; other aggregates | ||||
6147 | // are returned indirectly. | ||||
6148 | uint64_t Size = getContext().getTypeSize(RetTy); | ||||
6149 | if (Size <= 32) { | ||||
6150 | // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of | ||||
6151 | // same size and alignment. | ||||
6152 | if (getTarget().isRenderScriptTarget()) { | ||||
6153 | return coerceToIntArray(RetTy, getContext(), getVMContext()); | ||||
6154 | } | ||||
6155 | if (getDataLayout().isBigEndian()) | ||||
6156 | // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) | ||||
6157 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); | ||||
6158 | |||||
6159 | // Return in the smallest viable integer type. | ||||
6160 | if (Size <= 8) | ||||
6161 | return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); | ||||
6162 | if (Size <= 16) | ||||
6163 | return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); | ||||
6164 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); | ||||
6165 | } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) { | ||||
6166 | llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); | ||||
6167 | llvm::Type *CoerceTy = | ||||
6168 | llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); | ||||
6169 | return ABIArgInfo::getDirect(CoerceTy); | ||||
6170 | } | ||||
6171 | |||||
6172 | return getNaturalAlignIndirect(RetTy); | ||||
6173 | } | ||||
6174 | |||||
6175 | /// isIllegalVector - check whether Ty is an illegal vector type. | ||||
6176 | bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { | ||||
6177 | if (const VectorType *VT = Ty->getAs<VectorType> ()) { | ||||
6178 | // On targets that don't support FP16, FP16 is expanded into float, and we | ||||
6179 | // don't want the ABI to depend on whether or not FP16 is supported in | ||||
6180 | // hardware. Thus return false to coerce FP16 vectors into integer vectors. | ||||
6181 | if (!getTarget().hasLegalHalfType() && | ||||
6182 | (VT->getElementType()->isFloat16Type() || | ||||
6183 | VT->getElementType()->isHalfType())) | ||||
6184 | return true; | ||||
6185 | if (isAndroid()) { | ||||
6186 | // Android shipped using Clang 3.1, which supported a slightly different | ||||
6187 | // vector ABI. The primary differences were that 3-element vector types | ||||
6188 | // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path | ||||
6189 | // accepts that legacy behavior for Android only. | ||||
6190 | // Check whether VT is legal. | ||||
6191 | unsigned NumElements = VT->getNumElements(); | ||||
6192 | // NumElements should be power of 2 or equal to 3. | ||||
6193 | if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) | ||||
6194 | return true; | ||||
6195 | } else { | ||||
6196 | // Check whether VT is legal. | ||||
6197 | unsigned NumElements = VT->getNumElements(); | ||||
6198 | uint64_t Size = getContext().getTypeSize(VT); | ||||
6199 | // NumElements should be power of 2. | ||||
6200 | if (!llvm::isPowerOf2_32(NumElements)) | ||||
6201 | return true; | ||||
6202 | // Size should be greater than 32 bits. | ||||
6203 | return Size <= 32; | ||||
6204 | } | ||||
6205 | } | ||||
6206 | return false; | ||||
6207 | } | ||||
6208 | |||||
6209 | /// Return true if a type contains any 16-bit floating point vectors | ||||
6210 | bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const { | ||||
6211 | if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { | ||||
6212 | uint64_t NElements = AT->getSize().getZExtValue(); | ||||
6213 | if (NElements == 0) | ||||
6214 | return false; | ||||
6215 | return containsAnyFP16Vectors(AT->getElementType()); | ||||
6216 | } else if (const RecordType *RT = Ty->getAs<RecordType>()) { | ||||
6217 | const RecordDecl *RD = RT->getDecl(); | ||||
6218 | |||||
6219 | // If this is a C++ record, check the bases first. | ||||
6220 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) | ||||
6221 | if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) { | ||||
6222 | return containsAnyFP16Vectors(B.getType()); | ||||
6223 | })) | ||||
6224 | return true; | ||||
6225 | |||||
6226 | if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) { | ||||
6227 | return FD && containsAnyFP16Vectors(FD->getType()); | ||||
6228 | })) | ||||
6229 | return true; | ||||
6230 | |||||
6231 | return false; | ||||
6232 | } else { | ||||
6233 | if (const VectorType *VT = Ty->getAs<VectorType>()) | ||||
6234 | return (VT->getElementType()->isFloat16Type() || | ||||
6235 | VT->getElementType()->isHalfType()); | ||||
6236 | return false; | ||||
6237 | } | ||||
6238 | } | ||||
6239 | |||||
6240 | bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, | ||||
6241 | llvm::Type *eltTy, | ||||
6242 | unsigned numElts) const { | ||||
6243 | if (!llvm::isPowerOf2_32(numElts)) | ||||
6244 | return false; | ||||
6245 | unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy); | ||||
6246 | if (size > 64) | ||||
6247 | return false; | ||||
6248 | if (vectorSize.getQuantity() != 8 && | ||||
6249 | (vectorSize.getQuantity() != 16 || numElts == 1)) | ||||
6250 | return false; | ||||
6251 | return true; | ||||
6252 | } | ||||
6253 | |||||
6254 | bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { | ||||
6255 | // Homogeneous aggregates for AAPCS-VFP must have base types of float, | ||||
6256 | // double, or 64-bit or 128-bit vectors. | ||||
6257 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { | ||||
6258 | if (BT->getKind() == BuiltinType::Float || | ||||
6259 | BT->getKind() == BuiltinType::Double || | ||||
6260 | BT->getKind() == BuiltinType::LongDouble) | ||||
6261 | return true; | ||||
6262 | } else if (const VectorType *VT = Ty->getAs<VectorType>()) { | ||||
6263 | unsigned VecSize = getContext().getTypeSize(VT); | ||||
6264 | if (VecSize == 64 || VecSize == 128) | ||||
6265 | return true; | ||||
6266 | } | ||||
6267 | return false; | ||||
6268 | } | ||||
6269 | |||||
6270 | bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, | ||||
6271 | uint64_t Members) const { | ||||
6272 | return Members <= 4; | ||||
6273 | } | ||||
6274 | |||||
6275 | bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention, | ||||
6276 | bool acceptHalf) const { | ||||
6277 | // Give precedence to user-specified calling conventions. | ||||
6278 | if (callConvention != llvm::CallingConv::C) | ||||
6279 | return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP); | ||||
6280 | else | ||||
6281 | return (getABIKind() == AAPCS_VFP) || | ||||
6282 | (acceptHalf && (getABIKind() == AAPCS16_VFP)); | ||||
6283 | } | ||||
6284 | |||||
6285 | Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
6286 | QualType Ty) const { | ||||
6287 | CharUnits SlotSize = CharUnits::fromQuantity(4); | ||||
6288 | |||||
6289 | // Empty records are ignored for parameter passing purposes. | ||||
6290 | if (isEmptyRecord(getContext(), Ty, true)) { | ||||
6291 | Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); | ||||
6292 | Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); | ||||
6293 | return Addr; | ||||
6294 | } | ||||
6295 | |||||
6296 | CharUnits TySize = getContext().getTypeSizeInChars(Ty); | ||||
6297 | CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty); | ||||
6298 | |||||
6299 | // Use indirect if size of the illegal vector is bigger than 16 bytes. | ||||
6300 | bool IsIndirect = false; | ||||
6301 | const Type *Base = nullptr; | ||||
6302 | uint64_t Members = 0; | ||||
6303 | if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { | ||||
6304 | IsIndirect = true; | ||||
6305 | |||||
6306 | // ARMv7k passes structs bigger than 16 bytes indirectly, in space | ||||
6307 | // allocated by the caller. | ||||
6308 | } else if (TySize > CharUnits::fromQuantity(16) && | ||||
6309 | getABIKind() == ARMABIInfo::AAPCS16_VFP && | ||||
6310 | !isHomogeneousAggregate(Ty, Base, Members)) { | ||||
6311 | IsIndirect = true; | ||||
6312 | |||||
6313 | // Otherwise, bound the type's ABI alignment. | ||||
6314 | // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for | ||||
6315 | // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. | ||||
6316 | // Our callers should be prepared to handle an under-aligned address. | ||||
6317 | } else if (getABIKind() == ARMABIInfo::AAPCS_VFP || | ||||
6318 | getABIKind() == ARMABIInfo::AAPCS) { | ||||
6319 | TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); | ||||
6320 | TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); | ||||
6321 | } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { | ||||
6322 | // ARMv7k allows type alignment up to 16 bytes. | ||||
6323 | TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); | ||||
6324 | TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); | ||||
6325 | } else { | ||||
6326 | TyAlignForABI = CharUnits::fromQuantity(4); | ||||
6327 | } | ||||
6328 | |||||
6329 | std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI }; | ||||
6330 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, | ||||
6331 | SlotSize, /*AllowHigherAlign*/ true); | ||||
6332 | } | ||||
6333 | |||||
6334 | //===----------------------------------------------------------------------===// | ||||
6335 | // NVPTX ABI Implementation | ||||
6336 | //===----------------------------------------------------------------------===// | ||||
6337 | |||||
6338 | namespace { | ||||
6339 | |||||
6340 | class NVPTXABIInfo : public ABIInfo { | ||||
6341 | public: | ||||
6342 | NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} | ||||
6343 | |||||
6344 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
6345 | ABIArgInfo classifyArgumentType(QualType Ty) const; | ||||
6346 | |||||
6347 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
6348 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
6349 | QualType Ty) const override; | ||||
6350 | }; | ||||
6351 | |||||
6352 | class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
6353 | public: | ||||
6354 | NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) | ||||
6355 | : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} | ||||
6356 | |||||
6357 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
6358 | CodeGen::CodeGenModule &M) const override; | ||||
6359 | bool shouldEmitStaticExternCAliases() const override; | ||||
6360 | |||||
6361 | private: | ||||
6362 | // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the | ||||
6363 | // resulting MDNode to the nvvm.annotations MDNode. | ||||
6364 | static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand); | ||||
6365 | }; | ||||
6366 | |||||
6367 | /// Checks if the type is unsupported directly by the current target. | ||||
6368 | static bool isUnsupportedType(ASTContext &Context, QualType T) { | ||||
6369 | if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type()) | ||||
6370 | return true; | ||||
6371 | if (!Context.getTargetInfo().hasFloat128Type() && | ||||
6372 | (T->isFloat128Type() || | ||||
6373 | (T->isRealFloatingType() && Context.getTypeSize(T) == 128))) | ||||
6374 | return true; | ||||
6375 | if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() && | ||||
6376 | Context.getTypeSize(T) > 64) | ||||
6377 | return true; | ||||
6378 | if (const auto *AT = T->getAsArrayTypeUnsafe()) | ||||
6379 | return isUnsupportedType(Context, AT->getElementType()); | ||||
6380 | const auto *RT = T->getAs<RecordType>(); | ||||
6381 | if (!RT) | ||||
6382 | return false; | ||||
6383 | const RecordDecl *RD = RT->getDecl(); | ||||
6384 | |||||
6385 | // If this is a C++ record, check the bases first. | ||||
6386 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) | ||||
6387 | for (const CXXBaseSpecifier &I : CXXRD->bases()) | ||||
6388 | if (isUnsupportedType(Context, I.getType())) | ||||
6389 | return true; | ||||
6390 | |||||
6391 | for (const FieldDecl *I : RD->fields()) | ||||
6392 | if (isUnsupportedType(Context, I->getType())) | ||||
6393 | return true; | ||||
6394 | return false; | ||||
6395 | } | ||||
6396 | |||||
6397 | /// Coerce the given type into an array with maximum allowed size of elements. | ||||
6398 | static ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, ASTContext &Context, | ||||
6399 | llvm::LLVMContext &LLVMContext, | ||||
6400 | unsigned MaxSize) { | ||||
6401 | // Alignment and Size are measured in bits. | ||||
6402 | const uint64_t Size = Context.getTypeSize(Ty); | ||||
6403 | const uint64_t Alignment = Context.getTypeAlign(Ty); | ||||
6404 | const unsigned Div = std::min<unsigned>(MaxSize, Alignment); | ||||
6405 | llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Div); | ||||
6406 | const uint64_t NumElements = (Size + Div - 1) / Div; | ||||
6407 | return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); | ||||
6408 | } | ||||
6409 | |||||
6410 | ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { | ||||
6411 | if (RetTy->isVoidType()) | ||||
6412 | return ABIArgInfo::getIgnore(); | ||||
6413 | |||||
6414 | if (getContext().getLangOpts().OpenMP && | ||||
6415 | getContext().getLangOpts().OpenMPIsDevice && | ||||
6416 | isUnsupportedType(getContext(), RetTy)) | ||||
6417 | return coerceToIntArrayWithLimit(RetTy, getContext(), getVMContext(), 64); | ||||
6418 | |||||
6419 | // note: this is different from default ABI | ||||
6420 | if (!RetTy->isScalarType()) | ||||
6421 | return ABIArgInfo::getDirect(); | ||||
6422 | |||||
6423 | // Treat an enum type as its underlying type. | ||||
6424 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) | ||||
6425 | RetTy = EnumTy->getDecl()->getIntegerType(); | ||||
6426 | |||||
6427 | return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) | ||||
6428 | : ABIArgInfo::getDirect()); | ||||
6429 | } | ||||
6430 | |||||
6431 | ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { | ||||
6432 | // Treat an enum type as its underlying type. | ||||
6433 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
6434 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
6435 | |||||
6436 | // Return aggregates type as indirect by value | ||||
6437 | if (isAggregateTypeForABI(Ty)) | ||||
6438 | return getNaturalAlignIndirect(Ty, /* byval */ true); | ||||
6439 | |||||
6440 | return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) | ||||
6441 | : ABIArgInfo::getDirect()); | ||||
6442 | } | ||||
6443 | |||||
6444 | void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
6445 | if (!getCXXABI().classifyReturnType(FI)) | ||||
6446 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
6447 | for (auto &I : FI.arguments()) | ||||
6448 | I.info = classifyArgumentType(I.type); | ||||
6449 | |||||
6450 | // Always honor user-specified calling convention. | ||||
6451 | if (FI.getCallingConvention() != llvm::CallingConv::C) | ||||
6452 | return; | ||||
6453 | |||||
6454 | FI.setEffectiveCallingConvention(getRuntimeCC()); | ||||
6455 | } | ||||
6456 | |||||
6457 | Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
6458 | QualType Ty) const { | ||||
6459 | llvm_unreachable("NVPTX does not support varargs")::llvm::llvm_unreachable_internal("NVPTX does not support varargs" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 6459); | ||||
6460 | } | ||||
6461 | |||||
6462 | void NVPTXTargetCodeGenInfo::setTargetAttributes( | ||||
6463 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { | ||||
6464 | if (GV->isDeclaration()) | ||||
6465 | return; | ||||
6466 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); | ||||
6467 | if (!FD) return; | ||||
6468 | |||||
6469 | llvm::Function *F = cast<llvm::Function>(GV); | ||||
6470 | |||||
6471 | // Perform special handling in OpenCL mode | ||||
6472 | if (M.getLangOpts().OpenCL) { | ||||
6473 | // Use OpenCL function attributes to check for kernel functions | ||||
6474 | // By default, all functions are device functions | ||||
6475 | if (FD->hasAttr<OpenCLKernelAttr>()) { | ||||
6476 | // OpenCL __kernel functions get kernel metadata | ||||
6477 | // Create !{<func-ref>, metadata !"kernel", i32 1} node | ||||
6478 | addNVVMMetadata(F, "kernel", 1); | ||||
6479 | // And kernel functions are not subject to inlining | ||||
6480 | F->addFnAttr(llvm::Attribute::NoInline); | ||||
6481 | } | ||||
6482 | } | ||||
6483 | |||||
6484 | // Perform special handling in CUDA mode. | ||||
6485 | if (M.getLangOpts().CUDA) { | ||||
6486 | // CUDA __global__ functions get a kernel metadata entry. Since | ||||
6487 | // __global__ functions cannot be called from the device, we do not | ||||
6488 | // need to set the noinline attribute. | ||||
6489 | if (FD->hasAttr<CUDAGlobalAttr>()) { | ||||
6490 | // Create !{<func-ref>, metadata !"kernel", i32 1} node | ||||
6491 | addNVVMMetadata(F, "kernel", 1); | ||||
6492 | } | ||||
6493 | if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { | ||||
6494 | // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node | ||||
6495 | llvm::APSInt MaxThreads(32); | ||||
6496 | MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); | ||||
6497 | if (MaxThreads > 0) | ||||
6498 | addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); | ||||
6499 | |||||
6500 | // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was | ||||
6501 | // not specified in __launch_bounds__ or if the user specified a 0 value, | ||||
6502 | // we don't have to add a PTX directive. | ||||
6503 | if (Attr->getMinBlocks()) { | ||||
6504 | llvm::APSInt MinBlocks(32); | ||||
6505 | MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); | ||||
6506 | if (MinBlocks > 0) | ||||
6507 | // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node | ||||
6508 | addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); | ||||
6509 | } | ||||
6510 | } | ||||
6511 | } | ||||
6512 | } | ||||
6513 | |||||
6514 | void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, | ||||
6515 | int Operand) { | ||||
6516 | llvm::Module *M = F->getParent(); | ||||
6517 | llvm::LLVMContext &Ctx = M->getContext(); | ||||
6518 | |||||
6519 | // Get "nvvm.annotations" metadata node | ||||
6520 | llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); | ||||
6521 | |||||
6522 | llvm::Metadata *MDVals[] = { | ||||
6523 | llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name), | ||||
6524 | llvm::ConstantAsMetadata::get( | ||||
6525 | llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; | ||||
6526 | // Append metadata to nvvm.annotations | ||||
6527 | MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); | ||||
6528 | } | ||||
6529 | |||||
6530 | bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { | ||||
6531 | return false; | ||||
6532 | } | ||||
6533 | } | ||||
6534 | |||||
6535 | //===----------------------------------------------------------------------===// | ||||
6536 | // SystemZ ABI Implementation | ||||
6537 | //===----------------------------------------------------------------------===// | ||||
6538 | |||||
6539 | namespace { | ||||
6540 | |||||
6541 | class SystemZABIInfo : public SwiftABIInfo { | ||||
6542 | bool HasVector; | ||||
6543 | |||||
6544 | public: | ||||
6545 | SystemZABIInfo(CodeGenTypes &CGT, bool HV) | ||||
6546 | : SwiftABIInfo(CGT), HasVector(HV) {} | ||||
6547 | |||||
6548 | bool isPromotableIntegerType(QualType Ty) const; | ||||
6549 | bool isCompoundType(QualType Ty) const; | ||||
6550 | bool isVectorArgumentType(QualType Ty) const; | ||||
6551 | bool isFPArgumentType(QualType Ty) const; | ||||
6552 | QualType GetSingleElementType(QualType Ty) const; | ||||
6553 | |||||
6554 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
6555 | ABIArgInfo classifyArgumentType(QualType ArgTy) const; | ||||
6556 | |||||
6557 | void computeInfo(CGFunctionInfo &FI) const override { | ||||
6558 | if (!getCXXABI().classifyReturnType(FI)) | ||||
6559 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
6560 | for (auto &I : FI.arguments()) | ||||
6561 | I.info = classifyArgumentType(I.type); | ||||
6562 | } | ||||
6563 | |||||
6564 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
6565 | QualType Ty) const override; | ||||
6566 | |||||
6567 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, | ||||
6568 | bool asReturnValue) const override { | ||||
6569 | return occupiesMoreThan(CGT, scalars, /*total*/ 4); | ||||
6570 | } | ||||
6571 | bool isSwiftErrorInRegister() const override { | ||||
6572 | return false; | ||||
6573 | } | ||||
6574 | }; | ||||
6575 | |||||
6576 | class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
6577 | public: | ||||
6578 | SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector) | ||||
6579 | : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {} | ||||
6580 | }; | ||||
6581 | |||||
6582 | } | ||||
6583 | |||||
6584 | bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { | ||||
6585 | // Treat an enum type as its underlying type. | ||||
6586 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
6587 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
6588 | |||||
6589 | // Promotable integer types are required to be promoted by the ABI. | ||||
6590 | if (Ty->isPromotableIntegerType()) | ||||
6591 | return true; | ||||
6592 | |||||
6593 | // 32-bit values must also be promoted. | ||||
6594 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) | ||||
6595 | switch (BT->getKind()) { | ||||
6596 | case BuiltinType::Int: | ||||
6597 | case BuiltinType::UInt: | ||||
6598 | return true; | ||||
6599 | default: | ||||
6600 | return false; | ||||
6601 | } | ||||
6602 | return false; | ||||
6603 | } | ||||
6604 | |||||
6605 | bool SystemZABIInfo::isCompoundType(QualType Ty) const { | ||||
6606 | return (Ty->isAnyComplexType() || | ||||
6607 | Ty->isVectorType() || | ||||
6608 | isAggregateTypeForABI(Ty)); | ||||
6609 | } | ||||
6610 | |||||
6611 | bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { | ||||
6612 | return (HasVector && | ||||
6613 | Ty->isVectorType() && | ||||
6614 | getContext().getTypeSize(Ty) <= 128); | ||||
6615 | } | ||||
6616 | |||||
6617 | bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { | ||||
6618 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) | ||||
6619 | switch (BT->getKind()) { | ||||
6620 | case BuiltinType::Float: | ||||
6621 | case BuiltinType::Double: | ||||
6622 | return true; | ||||
6623 | default: | ||||
6624 | return false; | ||||
6625 | } | ||||
6626 | |||||
6627 | return false; | ||||
6628 | } | ||||
6629 | |||||
6630 | QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { | ||||
6631 | if (const RecordType *RT = Ty->getAsStructureType()) { | ||||
6632 | const RecordDecl *RD = RT->getDecl(); | ||||
6633 | QualType Found; | ||||
6634 | |||||
6635 | // If this is a C++ record, check the bases first. | ||||
6636 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) | ||||
6637 | for (const auto &I : CXXRD->bases()) { | ||||
6638 | QualType Base = I.getType(); | ||||
6639 | |||||
6640 | // Empty bases don't affect things either way. | ||||
6641 | if (isEmptyRecord(getContext(), Base, true)) | ||||
6642 | continue; | ||||
6643 | |||||
6644 | if (!Found.isNull()) | ||||
6645 | return Ty; | ||||
6646 | Found = GetSingleElementType(Base); | ||||
6647 | } | ||||
6648 | |||||
6649 | // Check the fields. | ||||
6650 | for (const auto *FD : RD->fields()) { | ||||
6651 | // For compatibility with GCC, ignore empty bitfields in C++ mode. | ||||
6652 | // Unlike isSingleElementStruct(), empty structure and array fields | ||||
6653 | // do count. So do anonymous bitfields that aren't zero-sized. | ||||
6654 | if (getContext().getLangOpts().CPlusPlus && | ||||
6655 | FD->isZeroLengthBitField(getContext())) | ||||
6656 | continue; | ||||
6657 | |||||
6658 | // Unlike isSingleElementStruct(), arrays do not count. | ||||
6659 | // Nested structures still do though. | ||||
6660 | if (!Found.isNull()) | ||||
6661 | return Ty; | ||||
6662 | Found = GetSingleElementType(FD->getType()); | ||||
6663 | } | ||||
6664 | |||||
6665 | // Unlike isSingleElementStruct(), trailing padding is allowed. | ||||
6666 | // An 8-byte aligned struct s { float f; } is passed as a double. | ||||
6667 | if (!Found.isNull()) | ||||
6668 | return Found; | ||||
6669 | } | ||||
6670 | |||||
6671 | return Ty; | ||||
6672 | } | ||||
6673 | |||||
6674 | Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
6675 | QualType Ty) const { | ||||
6676 | // Assume that va_list type is correct; should be pointer to LLVM type: | ||||
6677 | // struct { | ||||
6678 | // i64 __gpr; | ||||
6679 | // i64 __fpr; | ||||
6680 | // i8 *__overflow_arg_area; | ||||
6681 | // i8 *__reg_save_area; | ||||
6682 | // }; | ||||
6683 | |||||
6684 | // Every non-vector argument occupies 8 bytes and is passed by preference | ||||
6685 | // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are | ||||
6686 | // always passed on the stack. | ||||
6687 | Ty = getContext().getCanonicalType(Ty); | ||||
6688 | auto TyInfo = getContext().getTypeInfoInChars(Ty); | ||||
6689 | llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); | ||||
6690 | llvm::Type *DirectTy = ArgTy; | ||||
6691 | ABIArgInfo AI = classifyArgumentType(Ty); | ||||
6692 | bool IsIndirect = AI.isIndirect(); | ||||
6693 | bool InFPRs = false; | ||||
6694 | bool IsVector = false; | ||||
6695 | CharUnits UnpaddedSize; | ||||
6696 | CharUnits DirectAlign; | ||||
6697 | if (IsIndirect) { | ||||
6698 | DirectTy = llvm::PointerType::getUnqual(DirectTy); | ||||
6699 | UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); | ||||
6700 | } else { | ||||
6701 | if (AI.getCoerceToType()) | ||||
6702 | ArgTy = AI.getCoerceToType(); | ||||
6703 | InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy(); | ||||
6704 | IsVector = ArgTy->isVectorTy(); | ||||
6705 | UnpaddedSize = TyInfo.first; | ||||
6706 | DirectAlign = TyInfo.second; | ||||
6707 | } | ||||
6708 | CharUnits PaddedSize = CharUnits::fromQuantity(8); | ||||
6709 | if (IsVector && UnpaddedSize > PaddedSize) | ||||
6710 | PaddedSize = CharUnits::fromQuantity(16); | ||||
6711 | assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.")(((UnpaddedSize <= PaddedSize) && "Invalid argument size." ) ? static_cast<void> (0) : __assert_fail ("(UnpaddedSize <= PaddedSize) && \"Invalid argument size.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 6711, __PRETTY_FUNCTION__)); | ||||
6712 | |||||
6713 | CharUnits Padding = (PaddedSize - UnpaddedSize); | ||||
6714 | |||||
6715 | llvm::Type *IndexTy = CGF.Int64Ty; | ||||
6716 | llvm::Value *PaddedSizeV = | ||||
6717 | llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); | ||||
6718 | |||||
6719 | if (IsVector) { | ||||
6720 | // Work out the address of a vector argument on the stack. | ||||
6721 | // Vector arguments are always passed in the high bits of a | ||||
6722 | // single (8 byte) or double (16 byte) stack slot. | ||||
6723 | Address OverflowArgAreaPtr = | ||||
6724 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); | ||||
6725 | Address OverflowArgArea = | ||||
6726 | Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), | ||||
6727 | TyInfo.second); | ||||
6728 | Address MemAddr = | ||||
6729 | CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr"); | ||||
6730 | |||||
6731 | // Update overflow_arg_area_ptr pointer | ||||
6732 | llvm::Value *NewOverflowArgArea = | ||||
6733 | CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, | ||||
6734 | "overflow_arg_area"); | ||||
6735 | CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); | ||||
6736 | |||||
6737 | return MemAddr; | ||||
6738 | } | ||||
6739 | |||||
6740 | assert(PaddedSize.getQuantity() == 8)((PaddedSize.getQuantity() == 8) ? static_cast<void> (0 ) : __assert_fail ("PaddedSize.getQuantity() == 8", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 6740, __PRETTY_FUNCTION__)); | ||||
6741 | |||||
6742 | unsigned MaxRegs, RegCountField, RegSaveIndex; | ||||
6743 | CharUnits RegPadding; | ||||
6744 | if (InFPRs) { | ||||
6745 | MaxRegs = 4; // Maximum of 4 FPR arguments | ||||
6746 | RegCountField = 1; // __fpr | ||||
6747 | RegSaveIndex = 16; // save offset for f0 | ||||
6748 | RegPadding = CharUnits(); // floats are passed in the high bits of an FPR | ||||
6749 | } else { | ||||
6750 | MaxRegs = 5; // Maximum of 5 GPR arguments | ||||
6751 | RegCountField = 0; // __gpr | ||||
6752 | RegSaveIndex = 2; // save offset for r2 | ||||
6753 | RegPadding = Padding; // values are passed in the low bits of a GPR | ||||
6754 | } | ||||
6755 | |||||
6756 | Address RegCountPtr = | ||||
6757 | CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); | ||||
6758 | llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); | ||||
6759 | llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); | ||||
6760 | llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, | ||||
6761 | "fits_in_regs"); | ||||
6762 | |||||
6763 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); | ||||
6764 | llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); | ||||
6765 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); | ||||
6766 | CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); | ||||
6767 | |||||
6768 | // Emit code to load the value if it was passed in registers. | ||||
6769 | CGF.EmitBlock(InRegBlock); | ||||
6770 | |||||
6771 | // Work out the address of an argument register. | ||||
6772 | llvm::Value *ScaledRegCount = | ||||
6773 | CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); | ||||
6774 | llvm::Value *RegBase = | ||||
6775 | llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() | ||||
6776 | + RegPadding.getQuantity()); | ||||
6777 | llvm::Value *RegOffset = | ||||
6778 | CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); | ||||
6779 | Address RegSaveAreaPtr = | ||||
6780 | CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); | ||||
6781 | llvm::Value *RegSaveArea = | ||||
6782 | CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); | ||||
6783 | Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset, | ||||
6784 | "raw_reg_addr"), | ||||
6785 | PaddedSize); | ||||
6786 | Address RegAddr = | ||||
6787 | CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr"); | ||||
6788 | |||||
6789 | // Update the register count | ||||
6790 | llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); | ||||
6791 | llvm::Value *NewRegCount = | ||||
6792 | CGF.Builder.CreateAdd(RegCount, One, "reg_count"); | ||||
6793 | CGF.Builder.CreateStore(NewRegCount, RegCountPtr); | ||||
6794 | CGF.EmitBranch(ContBlock); | ||||
6795 | |||||
6796 | // Emit code to load the value if it was passed in memory. | ||||
6797 | CGF.EmitBlock(InMemBlock); | ||||
6798 | |||||
6799 | // Work out the address of a stack argument. | ||||
6800 | Address OverflowArgAreaPtr = | ||||
6801 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); | ||||
6802 | Address OverflowArgArea = | ||||
6803 | Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), | ||||
6804 | PaddedSize); | ||||
6805 | Address RawMemAddr = | ||||
6806 | CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); | ||||
6807 | Address MemAddr = | ||||
6808 | CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); | ||||
6809 | |||||
6810 | // Update overflow_arg_area_ptr pointer | ||||
6811 | llvm::Value *NewOverflowArgArea = | ||||
6812 | CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, | ||||
6813 | "overflow_arg_area"); | ||||
6814 | CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); | ||||
6815 | CGF.EmitBranch(ContBlock); | ||||
6816 | |||||
6817 | // Return the appropriate result. | ||||
6818 | CGF.EmitBlock(ContBlock); | ||||
6819 | Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, | ||||
6820 | MemAddr, InMemBlock, "va_arg.addr"); | ||||
6821 | |||||
6822 | if (IsIndirect) | ||||
6823 | ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), | ||||
6824 | TyInfo.second); | ||||
6825 | |||||
6826 | return ResAddr; | ||||
6827 | } | ||||
6828 | |||||
6829 | ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { | ||||
6830 | if (RetTy->isVoidType()) | ||||
6831 | return ABIArgInfo::getIgnore(); | ||||
6832 | if (isVectorArgumentType(RetTy)) | ||||
6833 | return ABIArgInfo::getDirect(); | ||||
6834 | if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) | ||||
6835 | return getNaturalAlignIndirect(RetTy); | ||||
6836 | return (isPromotableIntegerType(RetTy) ? ABIArgInfo::getExtend(RetTy) | ||||
6837 | : ABIArgInfo::getDirect()); | ||||
6838 | } | ||||
6839 | |||||
6840 | ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { | ||||
6841 | // Handle the generic C++ ABI. | ||||
6842 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) | ||||
6843 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
6844 | |||||
6845 | // Integers and enums are extended to full register width. | ||||
6846 | if (isPromotableIntegerType(Ty)) | ||||
6847 | return ABIArgInfo::getExtend(Ty); | ||||
6848 | |||||
6849 | // Handle vector types and vector-like structure types. Note that | ||||
6850 | // as opposed to float-like structure types, we do not allow any | ||||
6851 | // padding for vector-like structures, so verify the sizes match. | ||||
6852 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
6853 | QualType SingleElementTy = GetSingleElementType(Ty); | ||||
6854 | if (isVectorArgumentType(SingleElementTy) && | ||||
6855 | getContext().getTypeSize(SingleElementTy) == Size) | ||||
6856 | return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); | ||||
6857 | |||||
6858 | // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. | ||||
6859 | if (Size != 8 && Size != 16 && Size != 32 && Size != 64) | ||||
6860 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
6861 | |||||
6862 | // Handle small structures. | ||||
6863 | if (const RecordType *RT = Ty->getAs<RecordType>()) { | ||||
6864 | // Structures with flexible arrays have variable length, so really | ||||
6865 | // fail the size test above. | ||||
6866 | const RecordDecl *RD = RT->getDecl(); | ||||
6867 | if (RD->hasFlexibleArrayMember()) | ||||
6868 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
6869 | |||||
6870 | // The structure is passed as an unextended integer, a float, or a double. | ||||
6871 | llvm::Type *PassTy; | ||||
6872 | if (isFPArgumentType(SingleElementTy)) { | ||||
6873 | assert(Size == 32 || Size == 64)((Size == 32 || Size == 64) ? static_cast<void> (0) : __assert_fail ("Size == 32 || Size == 64", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 6873, __PRETTY_FUNCTION__)); | ||||
6874 | if (Size == 32) | ||||
6875 | PassTy = llvm::Type::getFloatTy(getVMContext()); | ||||
6876 | else | ||||
6877 | PassTy = llvm::Type::getDoubleTy(getVMContext()); | ||||
6878 | } else | ||||
6879 | PassTy = llvm::IntegerType::get(getVMContext(), Size); | ||||
6880 | return ABIArgInfo::getDirect(PassTy); | ||||
6881 | } | ||||
6882 | |||||
6883 | // Non-structure compounds are passed indirectly. | ||||
6884 | if (isCompoundType(Ty)) | ||||
6885 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
6886 | |||||
6887 | return ABIArgInfo::getDirect(nullptr); | ||||
6888 | } | ||||
6889 | |||||
6890 | //===----------------------------------------------------------------------===// | ||||
6891 | // MSP430 ABI Implementation | ||||
6892 | //===----------------------------------------------------------------------===// | ||||
6893 | |||||
6894 | namespace { | ||||
6895 | |||||
6896 | class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { | ||||
6897 | public: | ||||
6898 | MSP430TargetCodeGenInfo(CodeGenTypes &CGT) | ||||
6899 | : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} | ||||
6900 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
6901 | CodeGen::CodeGenModule &M) const override; | ||||
6902 | }; | ||||
6903 | |||||
6904 | } | ||||
6905 | |||||
6906 | void MSP430TargetCodeGenInfo::setTargetAttributes( | ||||
6907 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { | ||||
6908 | if (GV->isDeclaration()) | ||||
6909 | return; | ||||
6910 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { | ||||
6911 | const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>(); | ||||
6912 | if (!InterruptAttr) | ||||
6913 | return; | ||||
6914 | |||||
6915 | // Handle 'interrupt' attribute: | ||||
6916 | llvm::Function *F = cast<llvm::Function>(GV); | ||||
6917 | |||||
6918 | // Step 1: Set ISR calling convention. | ||||
6919 | F->setCallingConv(llvm::CallingConv::MSP430_INTR); | ||||
6920 | |||||
6921 | // Step 2: Add attributes goodness. | ||||
6922 | F->addFnAttr(llvm::Attribute::NoInline); | ||||
6923 | F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber())); | ||||
6924 | } | ||||
6925 | } | ||||
6926 | |||||
6927 | //===----------------------------------------------------------------------===// | ||||
6928 | // MIPS ABI Implementation. This works for both little-endian and | ||||
6929 | // big-endian variants. | ||||
6930 | //===----------------------------------------------------------------------===// | ||||
6931 | |||||
6932 | namespace { | ||||
6933 | class MipsABIInfo : public ABIInfo { | ||||
6934 | bool IsO32; | ||||
6935 | unsigned MinABIStackAlignInBytes, StackAlignInBytes; | ||||
6936 | void CoerceToIntArgs(uint64_t TySize, | ||||
6937 | SmallVectorImpl<llvm::Type *> &ArgList) const; | ||||
6938 | llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; | ||||
6939 | llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; | ||||
6940 | llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; | ||||
6941 | public: | ||||
6942 | MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : | ||||
6943 | ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), | ||||
6944 | StackAlignInBytes(IsO32 ? 8 : 16) {} | ||||
6945 | |||||
6946 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
6947 | ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; | ||||
6948 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
6949 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
6950 | QualType Ty) const override; | ||||
6951 | ABIArgInfo extendType(QualType Ty) const; | ||||
6952 | }; | ||||
6953 | |||||
6954 | class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
6955 | unsigned SizeOfUnwindException; | ||||
6956 | public: | ||||
6957 | MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) | ||||
6958 | : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), | ||||
6959 | SizeOfUnwindException(IsO32 ? 24 : 32) {} | ||||
6960 | |||||
6961 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { | ||||
6962 | return 29; | ||||
6963 | } | ||||
6964 | |||||
6965 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
6966 | CodeGen::CodeGenModule &CGM) const override { | ||||
6967 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); | ||||
6968 | if (!FD) return; | ||||
6969 | llvm::Function *Fn = cast<llvm::Function>(GV); | ||||
6970 | |||||
6971 | if (FD->hasAttr<MipsLongCallAttr>()) | ||||
6972 | Fn->addFnAttr("long-call"); | ||||
6973 | else if (FD->hasAttr<MipsShortCallAttr>()) | ||||
6974 | Fn->addFnAttr("short-call"); | ||||
6975 | |||||
6976 | // Other attributes do not have a meaning for declarations. | ||||
6977 | if (GV->isDeclaration()) | ||||
6978 | return; | ||||
6979 | |||||
6980 | if (FD->hasAttr<Mips16Attr>()) { | ||||
6981 | Fn->addFnAttr("mips16"); | ||||
6982 | } | ||||
6983 | else if (FD->hasAttr<NoMips16Attr>()) { | ||||
6984 | Fn->addFnAttr("nomips16"); | ||||
6985 | } | ||||
6986 | |||||
6987 | if (FD->hasAttr<MicroMipsAttr>()) | ||||
6988 | Fn->addFnAttr("micromips"); | ||||
6989 | else if (FD->hasAttr<NoMicroMipsAttr>()) | ||||
6990 | Fn->addFnAttr("nomicromips"); | ||||
6991 | |||||
6992 | const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>(); | ||||
6993 | if (!Attr) | ||||
6994 | return; | ||||
6995 | |||||
6996 | const char *Kind; | ||||
6997 | switch (Attr->getInterrupt()) { | ||||
6998 | case MipsInterruptAttr::eic: Kind = "eic"; break; | ||||
6999 | case MipsInterruptAttr::sw0: Kind = "sw0"; break; | ||||
7000 | case MipsInterruptAttr::sw1: Kind = "sw1"; break; | ||||
7001 | case MipsInterruptAttr::hw0: Kind = "hw0"; break; | ||||
7002 | case MipsInterruptAttr::hw1: Kind = "hw1"; break; | ||||
7003 | case MipsInterruptAttr::hw2: Kind = "hw2"; break; | ||||
7004 | case MipsInterruptAttr::hw3: Kind = "hw3"; break; | ||||
7005 | case MipsInterruptAttr::hw4: Kind = "hw4"; break; | ||||
7006 | case MipsInterruptAttr::hw5: Kind = "hw5"; break; | ||||
7007 | } | ||||
7008 | |||||
7009 | Fn->addFnAttr("interrupt", Kind); | ||||
7010 | |||||
7011 | } | ||||
7012 | |||||
7013 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
7014 | llvm::Value *Address) const override; | ||||
7015 | |||||
7016 | unsigned getSizeOfUnwindException() const override { | ||||
7017 | return SizeOfUnwindException; | ||||
7018 | } | ||||
7019 | }; | ||||
7020 | } | ||||
7021 | |||||
7022 | void MipsABIInfo::CoerceToIntArgs( | ||||
7023 | uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const { | ||||
7024 | llvm::IntegerType *IntTy = | ||||
7025 | llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); | ||||
7026 | |||||
7027 | // Add (TySize / MinABIStackAlignInBytes) args of IntTy. | ||||
7028 | for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) | ||||
7029 | ArgList.push_back(IntTy); | ||||
7030 | |||||
7031 | // If necessary, add one more integer type to ArgList. | ||||
7032 | unsigned R = TySize % (MinABIStackAlignInBytes * 8); | ||||
7033 | |||||
7034 | if (R) | ||||
7035 | ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); | ||||
7036 | } | ||||
7037 | |||||
7038 | // In N32/64, an aligned double precision floating point field is passed in | ||||
7039 | // a register. | ||||
7040 | llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { | ||||
7041 | SmallVector<llvm::Type*, 8> ArgList, IntArgList; | ||||
7042 | |||||
7043 | if (IsO32) { | ||||
7044 | CoerceToIntArgs(TySize, ArgList); | ||||
7045 | return llvm::StructType::get(getVMContext(), ArgList); | ||||
7046 | } | ||||
7047 | |||||
7048 | if (Ty->isComplexType()) | ||||
7049 | return CGT.ConvertType(Ty); | ||||
7050 | |||||
7051 | const RecordType *RT = Ty->getAs<RecordType>(); | ||||
7052 | |||||
7053 | // Unions/vectors are passed in integer registers. | ||||
7054 | if (!RT || !RT->isStructureOrClassType()) { | ||||
7055 | CoerceToIntArgs(TySize, ArgList); | ||||
7056 | return llvm::StructType::get(getVMContext(), ArgList); | ||||
7057 | } | ||||
7058 | |||||
7059 | const RecordDecl *RD = RT->getDecl(); | ||||
7060 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); | ||||
7061 | assert(!(TySize % 8) && "Size of structure must be multiple of 8.")((!(TySize % 8) && "Size of structure must be multiple of 8." ) ? static_cast<void> (0) : __assert_fail ("!(TySize % 8) && \"Size of structure must be multiple of 8.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 7061, __PRETTY_FUNCTION__)); | ||||
7062 | |||||
7063 | uint64_t LastOffset = 0; | ||||
7064 | unsigned idx = 0; | ||||
7065 | llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); | ||||
7066 | |||||
7067 | // Iterate over fields in the struct/class and check if there are any aligned | ||||
7068 | // double fields. | ||||
7069 | for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); | ||||
7070 | i != e; ++i, ++idx) { | ||||
7071 | const QualType Ty = i->getType(); | ||||
7072 | const BuiltinType *BT = Ty->getAs<BuiltinType>(); | ||||
7073 | |||||
7074 | if (!BT || BT->getKind() != BuiltinType::Double) | ||||
7075 | continue; | ||||
7076 | |||||
7077 | uint64_t Offset = Layout.getFieldOffset(idx); | ||||
7078 | if (Offset % 64) // Ignore doubles that are not aligned. | ||||
7079 | continue; | ||||
7080 | |||||
7081 | // Add ((Offset - LastOffset) / 64) args of type i64. | ||||
7082 | for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) | ||||
7083 | ArgList.push_back(I64); | ||||
7084 | |||||
7085 | // Add double type. | ||||
7086 | ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); | ||||
7087 | LastOffset = Offset + 64; | ||||
7088 | } | ||||
7089 | |||||
7090 | CoerceToIntArgs(TySize - LastOffset, IntArgList); | ||||
7091 | ArgList.append(IntArgList.begin(), IntArgList.end()); | ||||
7092 | |||||
7093 | return llvm::StructType::get(getVMContext(), ArgList); | ||||
7094 | } | ||||
7095 | |||||
7096 | llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, | ||||
7097 | uint64_t Offset) const { | ||||
7098 | if (OrigOffset + MinABIStackAlignInBytes > Offset) | ||||
7099 | return nullptr; | ||||
7100 | |||||
7101 | return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); | ||||
7102 | } | ||||
7103 | |||||
7104 | ABIArgInfo | ||||
7105 | MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { | ||||
7106 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
7107 | |||||
7108 | uint64_t OrigOffset = Offset; | ||||
7109 | uint64_t TySize = getContext().getTypeSize(Ty); | ||||
7110 | uint64_t Align = getContext().getTypeAlign(Ty) / 8; | ||||
7111 | |||||
7112 | Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), | ||||
7113 | (uint64_t)StackAlignInBytes); | ||||
7114 | unsigned CurrOffset = llvm::alignTo(Offset, Align); | ||||
7115 | Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8; | ||||
7116 | |||||
7117 | if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { | ||||
7118 | // Ignore empty aggregates. | ||||
7119 | if (TySize == 0) | ||||
7120 | return ABIArgInfo::getIgnore(); | ||||
7121 | |||||
7122 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { | ||||
7123 | Offset = OrigOffset + MinABIStackAlignInBytes; | ||||
7124 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
7125 | } | ||||
7126 | |||||
7127 | // If we have reached here, aggregates are passed directly by coercing to | ||||
7128 | // another structure type. Padding is inserted if the offset of the | ||||
7129 | // aggregate is unaligned. | ||||
7130 | ABIArgInfo ArgInfo = | ||||
7131 | ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, | ||||
7132 | getPaddingType(OrigOffset, CurrOffset)); | ||||
7133 | ArgInfo.setInReg(true); | ||||
7134 | return ArgInfo; | ||||
7135 | } | ||||
7136 | |||||
7137 | // Treat an enum type as its underlying type. | ||||
7138 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
7139 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
7140 | |||||
7141 | // All integral types are promoted to the GPR width. | ||||
7142 | if (Ty->isIntegralOrEnumerationType()) | ||||
7143 | return extendType(Ty); | ||||
7144 | |||||
7145 | return ABIArgInfo::getDirect( | ||||
7146 | nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); | ||||
7147 | } | ||||
7148 | |||||
7149 | llvm::Type* | ||||
7150 | MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { | ||||
7151 | const RecordType *RT = RetTy->getAs<RecordType>(); | ||||
7152 | SmallVector<llvm::Type*, 8> RTList; | ||||
7153 | |||||
7154 | if (RT && RT->isStructureOrClassType()) { | ||||
7155 | const RecordDecl *RD = RT->getDecl(); | ||||
7156 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); | ||||
7157 | unsigned FieldCnt = Layout.getFieldCount(); | ||||
7158 | |||||
7159 | // N32/64 returns struct/classes in floating point registers if the | ||||
7160 | // following conditions are met: | ||||
7161 | // 1. The size of the struct/class is no larger than 128-bit. | ||||
7162 | // 2. The struct/class has one or two fields all of which are floating | ||||
7163 | // point types. | ||||
7164 | // 3. The offset of the first field is zero (this follows what gcc does). | ||||
7165 | // | ||||
7166 | // Any other composite results are returned in integer registers. | ||||
7167 | // | ||||
7168 | if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { | ||||
7169 | RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); | ||||
7170 | for (; b != e; ++b) { | ||||
7171 | const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); | ||||
7172 | |||||
7173 | if (!BT || !BT->isFloatingPoint()) | ||||
7174 | break; | ||||
7175 | |||||
7176 | RTList.push_back(CGT.ConvertType(b->getType())); | ||||
7177 | } | ||||
7178 | |||||
7179 | if (b == e) | ||||
7180 | return llvm::StructType::get(getVMContext(), RTList, | ||||
7181 | RD->hasAttr<PackedAttr>()); | ||||
7182 | |||||
7183 | RTList.clear(); | ||||
7184 | } | ||||
7185 | } | ||||
7186 | |||||
7187 | CoerceToIntArgs(Size, RTList); | ||||
7188 | return llvm::StructType::get(getVMContext(), RTList); | ||||
7189 | } | ||||
7190 | |||||
7191 | ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { | ||||
7192 | uint64_t Size = getContext().getTypeSize(RetTy); | ||||
7193 | |||||
7194 | if (RetTy->isVoidType()) | ||||
7195 | return ABIArgInfo::getIgnore(); | ||||
7196 | |||||
7197 | // O32 doesn't treat zero-sized structs differently from other structs. | ||||
7198 | // However, N32/N64 ignores zero sized return values. | ||||
7199 | if (!IsO32 && Size == 0) | ||||
7200 | return ABIArgInfo::getIgnore(); | ||||
7201 | |||||
7202 | if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { | ||||
7203 | if (Size <= 128) { | ||||
7204 | if (RetTy->isAnyComplexType()) | ||||
7205 | return ABIArgInfo::getDirect(); | ||||
7206 | |||||
7207 | // O32 returns integer vectors in registers and N32/N64 returns all small | ||||
7208 | // aggregates in registers. | ||||
7209 | if (!IsO32 || | ||||
7210 | (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { | ||||
7211 | ABIArgInfo ArgInfo = | ||||
7212 | ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); | ||||
7213 | ArgInfo.setInReg(true); | ||||
7214 | return ArgInfo; | ||||
7215 | } | ||||
7216 | } | ||||
7217 | |||||
7218 | return getNaturalAlignIndirect(RetTy); | ||||
7219 | } | ||||
7220 | |||||
7221 | // Treat an enum type as its underlying type. | ||||
7222 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) | ||||
7223 | RetTy = EnumTy->getDecl()->getIntegerType(); | ||||
7224 | |||||
7225 | if (RetTy->isPromotableIntegerType()) | ||||
7226 | return ABIArgInfo::getExtend(RetTy); | ||||
7227 | |||||
7228 | if ((RetTy->isUnsignedIntegerOrEnumerationType() || | ||||
7229 | RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32) | ||||
7230 | return ABIArgInfo::getSignExtend(RetTy); | ||||
7231 | |||||
7232 | return ABIArgInfo::getDirect(); | ||||
7233 | } | ||||
7234 | |||||
7235 | void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
7236 | ABIArgInfo &RetInfo = FI.getReturnInfo(); | ||||
7237 | if (!getCXXABI().classifyReturnType(FI)) | ||||
7238 | RetInfo = classifyReturnType(FI.getReturnType()); | ||||
7239 | |||||
7240 | // Check if a pointer to an aggregate is passed as a hidden argument. | ||||
7241 | uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; | ||||
7242 | |||||
7243 | for (auto &I : FI.arguments()) | ||||
7244 | I.info = classifyArgumentType(I.type, Offset); | ||||
7245 | } | ||||
7246 | |||||
7247 | Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
7248 | QualType OrigTy) const { | ||||
7249 | QualType Ty = OrigTy; | ||||
7250 | |||||
7251 | // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. | ||||
7252 | // Pointers are also promoted in the same way but this only matters for N32. | ||||
7253 | unsigned SlotSizeInBits = IsO32 ? 32 : 64; | ||||
7254 | unsigned PtrWidth = getTarget().getPointerWidth(0); | ||||
7255 | bool DidPromote = false; | ||||
7256 | if ((Ty->isIntegerType() && | ||||
7257 | getContext().getIntWidth(Ty) < SlotSizeInBits) || | ||||
7258 | (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { | ||||
7259 | DidPromote = true; | ||||
7260 | Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, | ||||
7261 | Ty->isSignedIntegerType()); | ||||
7262 | } | ||||
7263 | |||||
7264 | auto TyInfo = getContext().getTypeInfoInChars(Ty); | ||||
7265 | |||||
7266 | // The alignment of things in the argument area is never larger than | ||||
7267 | // StackAlignInBytes. | ||||
7268 | TyInfo.second = | ||||
7269 | std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes)); | ||||
7270 | |||||
7271 | // MinABIStackAlignInBytes is the size of argument slots on the stack. | ||||
7272 | CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); | ||||
7273 | |||||
7274 | Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, | ||||
7275 | TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true); | ||||
7276 | |||||
7277 | |||||
7278 | // If there was a promotion, "unpromote" into a temporary. | ||||
7279 | // TODO: can we just use a pointer into a subset of the original slot? | ||||
7280 | if (DidPromote) { | ||||
7281 | Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp"); | ||||
7282 | llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); | ||||
7283 | |||||
7284 | // Truncate down to the right width. | ||||
7285 | llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() | ||||
7286 | : CGF.IntPtrTy); | ||||
7287 | llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); | ||||
7288 | if (OrigTy->isPointerType()) | ||||
7289 | V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); | ||||
7290 | |||||
7291 | CGF.Builder.CreateStore(V, Temp); | ||||
7292 | Addr = Temp; | ||||
7293 | } | ||||
7294 | |||||
7295 | return Addr; | ||||
7296 | } | ||||
7297 | |||||
7298 | ABIArgInfo MipsABIInfo::extendType(QualType Ty) const { | ||||
7299 | int TySize = getContext().getTypeSize(Ty); | ||||
7300 | |||||
7301 | // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. | ||||
7302 | if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) | ||||
7303 | return ABIArgInfo::getSignExtend(Ty); | ||||
7304 | |||||
7305 | return ABIArgInfo::getExtend(Ty); | ||||
7306 | } | ||||
7307 | |||||
7308 | bool | ||||
7309 | MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
7310 | llvm::Value *Address) const { | ||||
7311 | // This information comes from gcc's implementation, which seems to | ||||
7312 | // as canonical as it gets. | ||||
7313 | |||||
7314 | // Everything on MIPS is 4 bytes. Double-precision FP registers | ||||
7315 | // are aliased to pairs of single-precision FP registers. | ||||
7316 | llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); | ||||
7317 | |||||
7318 | // 0-31 are the general purpose registers, $0 - $31. | ||||
7319 | // 32-63 are the floating-point registers, $f0 - $f31. | ||||
7320 | // 64 and 65 are the multiply/divide registers, $hi and $lo. | ||||
7321 | // 66 is the (notional, I think) register for signal-handler return. | ||||
7322 | AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); | ||||
7323 | |||||
7324 | // 67-74 are the floating-point status registers, $fcc0 - $fcc7. | ||||
7325 | // They are one bit wide and ignored here. | ||||
7326 | |||||
7327 | // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. | ||||
7328 | // (coprocessor 1 is the FP unit) | ||||
7329 | // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. | ||||
7330 | // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. | ||||
7331 | // 176-181 are the DSP accumulator registers. | ||||
7332 | AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); | ||||
7333 | return false; | ||||
7334 | } | ||||
7335 | |||||
7336 | //===----------------------------------------------------------------------===// | ||||
7337 | // AVR ABI Implementation. | ||||
7338 | //===----------------------------------------------------------------------===// | ||||
7339 | |||||
7340 | namespace { | ||||
7341 | class AVRTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
7342 | public: | ||||
7343 | AVRTargetCodeGenInfo(CodeGenTypes &CGT) | ||||
7344 | : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { } | ||||
7345 | |||||
7346 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
7347 | CodeGen::CodeGenModule &CGM) const override { | ||||
7348 | if (GV->isDeclaration()) | ||||
7349 | return; | ||||
7350 | const auto *FD = dyn_cast_or_null<FunctionDecl>(D); | ||||
7351 | if (!FD) return; | ||||
7352 | auto *Fn = cast<llvm::Function>(GV); | ||||
7353 | |||||
7354 | if (FD->getAttr<AVRInterruptAttr>()) | ||||
7355 | Fn->addFnAttr("interrupt"); | ||||
7356 | |||||
7357 | if (FD->getAttr<AVRSignalAttr>()) | ||||
7358 | Fn->addFnAttr("signal"); | ||||
7359 | } | ||||
7360 | }; | ||||
7361 | } | ||||
7362 | |||||
7363 | //===----------------------------------------------------------------------===// | ||||
7364 | // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. | ||||
7365 | // Currently subclassed only to implement custom OpenCL C function attribute | ||||
7366 | // handling. | ||||
7367 | //===----------------------------------------------------------------------===// | ||||
7368 | |||||
7369 | namespace { | ||||
7370 | |||||
7371 | class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { | ||||
7372 | public: | ||||
7373 | TCETargetCodeGenInfo(CodeGenTypes &CGT) | ||||
7374 | : DefaultTargetCodeGenInfo(CGT) {} | ||||
7375 | |||||
7376 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
7377 | CodeGen::CodeGenModule &M) const override; | ||||
7378 | }; | ||||
7379 | |||||
7380 | void TCETargetCodeGenInfo::setTargetAttributes( | ||||
7381 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { | ||||
7382 | if (GV->isDeclaration()) | ||||
7383 | return; | ||||
7384 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); | ||||
7385 | if (!FD) return; | ||||
7386 | |||||
7387 | llvm::Function *F = cast<llvm::Function>(GV); | ||||
7388 | |||||
7389 | if (M.getLangOpts().OpenCL) { | ||||
7390 | if (FD->hasAttr<OpenCLKernelAttr>()) { | ||||
7391 | // OpenCL C Kernel functions are not subject to inlining | ||||
7392 | F->addFnAttr(llvm::Attribute::NoInline); | ||||
7393 | const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); | ||||
7394 | if (Attr) { | ||||
7395 | // Convert the reqd_work_group_size() attributes to metadata. | ||||
7396 | llvm::LLVMContext &Context = F->getContext(); | ||||
7397 | llvm::NamedMDNode *OpenCLMetadata = | ||||
7398 | M.getModule().getOrInsertNamedMetadata( | ||||
7399 | "opencl.kernel_wg_size_info"); | ||||
7400 | |||||
7401 | SmallVector<llvm::Metadata *, 5> Operands; | ||||
7402 | Operands.push_back(llvm::ConstantAsMetadata::get(F)); | ||||
7403 | |||||
7404 | Operands.push_back( | ||||
7405 | llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( | ||||
7406 | M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); | ||||
7407 | Operands.push_back( | ||||
7408 | llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( | ||||
7409 | M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); | ||||
7410 | Operands.push_back( | ||||
7411 | llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( | ||||
7412 | M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); | ||||
7413 | |||||
7414 | // Add a boolean constant operand for "required" (true) or "hint" | ||||
7415 | // (false) for implementing the work_group_size_hint attr later. | ||||
7416 | // Currently always true as the hint is not yet implemented. | ||||
7417 | Operands.push_back( | ||||
7418 | llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); | ||||
7419 | OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); | ||||
7420 | } | ||||
7421 | } | ||||
7422 | } | ||||
7423 | } | ||||
7424 | |||||
7425 | } | ||||
7426 | |||||
7427 | //===----------------------------------------------------------------------===// | ||||
7428 | // Hexagon ABI Implementation | ||||
7429 | //===----------------------------------------------------------------------===// | ||||
7430 | |||||
7431 | namespace { | ||||
7432 | |||||
7433 | class HexagonABIInfo : public ABIInfo { | ||||
7434 | |||||
7435 | |||||
7436 | public: | ||||
7437 | HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} | ||||
7438 | |||||
7439 | private: | ||||
7440 | |||||
7441 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
7442 | ABIArgInfo classifyArgumentType(QualType RetTy) const; | ||||
7443 | |||||
7444 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
7445 | |||||
7446 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
7447 | QualType Ty) const override; | ||||
7448 | }; | ||||
7449 | |||||
7450 | class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
7451 | public: | ||||
7452 | HexagonTargetCodeGenInfo(CodeGenTypes &CGT) | ||||
7453 | :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} | ||||
7454 | |||||
7455 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { | ||||
7456 | return 29; | ||||
7457 | } | ||||
7458 | }; | ||||
7459 | |||||
7460 | } | ||||
7461 | |||||
7462 | void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
7463 | if (!getCXXABI().classifyReturnType(FI)) | ||||
7464 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
7465 | for (auto &I : FI.arguments()) | ||||
7466 | I.info = classifyArgumentType(I.type); | ||||
7467 | } | ||||
7468 | |||||
7469 | ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { | ||||
7470 | if (!isAggregateTypeForABI(Ty)) { | ||||
7471 | // Treat an enum type as its underlying type. | ||||
7472 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
7473 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
7474 | |||||
7475 | return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) | ||||
7476 | : ABIArgInfo::getDirect()); | ||||
7477 | } | ||||
7478 | |||||
7479 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) | ||||
7480 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
7481 | |||||
7482 | // Ignore empty records. | ||||
7483 | if (isEmptyRecord(getContext(), Ty, true)) | ||||
7484 | return ABIArgInfo::getIgnore(); | ||||
7485 | |||||
7486 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
7487 | if (Size > 64) | ||||
7488 | return getNaturalAlignIndirect(Ty, /*ByVal=*/true); | ||||
7489 | // Pass in the smallest viable integer type. | ||||
7490 | else if (Size > 32) | ||||
7491 | return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); | ||||
7492 | else if (Size > 16) | ||||
7493 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); | ||||
7494 | else if (Size > 8) | ||||
7495 | return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); | ||||
7496 | else | ||||
7497 | return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); | ||||
7498 | } | ||||
7499 | |||||
7500 | ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { | ||||
7501 | if (RetTy->isVoidType()) | ||||
7502 | return ABIArgInfo::getIgnore(); | ||||
7503 | |||||
7504 | // Large vector types should be returned via memory. | ||||
7505 | if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) | ||||
7506 | return getNaturalAlignIndirect(RetTy); | ||||
7507 | |||||
7508 | if (!isAggregateTypeForABI(RetTy)) { | ||||
7509 | // Treat an enum type as its underlying type. | ||||
7510 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) | ||||
7511 | RetTy = EnumTy->getDecl()->getIntegerType(); | ||||
7512 | |||||
7513 | return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) | ||||
7514 | : ABIArgInfo::getDirect()); | ||||
7515 | } | ||||
7516 | |||||
7517 | if (isEmptyRecord(getContext(), RetTy, true)) | ||||
7518 | return ABIArgInfo::getIgnore(); | ||||
7519 | |||||
7520 | // Aggregates <= 8 bytes are returned in r0; other aggregates | ||||
7521 | // are returned indirectly. | ||||
7522 | uint64_t Size = getContext().getTypeSize(RetTy); | ||||
7523 | if (Size <= 64) { | ||||
7524 | // Return in the smallest viable integer type. | ||||
7525 | if (Size <= 8) | ||||
7526 | return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); | ||||
7527 | if (Size <= 16) | ||||
7528 | return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); | ||||
7529 | if (Size <= 32) | ||||
7530 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); | ||||
7531 | return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); | ||||
7532 | } | ||||
7533 | |||||
7534 | return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); | ||||
7535 | } | ||||
7536 | |||||
7537 | Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
7538 | QualType Ty) const { | ||||
7539 | // FIXME: Someone needs to audit that this handle alignment correctly. | ||||
7540 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, | ||||
7541 | getContext().getTypeInfoInChars(Ty), | ||||
7542 | CharUnits::fromQuantity(4), | ||||
7543 | /*AllowHigherAlign*/ true); | ||||
7544 | } | ||||
7545 | |||||
7546 | //===----------------------------------------------------------------------===// | ||||
7547 | // Lanai ABI Implementation | ||||
7548 | //===----------------------------------------------------------------------===// | ||||
7549 | |||||
7550 | namespace { | ||||
7551 | class LanaiABIInfo : public DefaultABIInfo { | ||||
7552 | public: | ||||
7553 | LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} | ||||
7554 | |||||
7555 | bool shouldUseInReg(QualType Ty, CCState &State) const; | ||||
7556 | |||||
7557 | void computeInfo(CGFunctionInfo &FI) const override { | ||||
7558 | CCState State(FI.getCallingConvention()); | ||||
7559 | // Lanai uses 4 registers to pass arguments unless the function has the | ||||
7560 | // regparm attribute set. | ||||
7561 | if (FI.getHasRegParm()) { | ||||
7562 | State.FreeRegs = FI.getRegParm(); | ||||
7563 | } else { | ||||
7564 | State.FreeRegs = 4; | ||||
7565 | } | ||||
7566 | |||||
7567 | if (!getCXXABI().classifyReturnType(FI)) | ||||
7568 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
7569 | for (auto &I : FI.arguments()) | ||||
7570 | I.info = classifyArgumentType(I.type, State); | ||||
7571 | } | ||||
7572 | |||||
7573 | ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; | ||||
7574 | ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; | ||||
7575 | }; | ||||
7576 | } // end anonymous namespace | ||||
7577 | |||||
7578 | bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const { | ||||
7579 | unsigned Size = getContext().getTypeSize(Ty); | ||||
7580 | unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U; | ||||
7581 | |||||
7582 | if (SizeInRegs == 0) | ||||
7583 | return false; | ||||
7584 | |||||
7585 | if (SizeInRegs > State.FreeRegs) { | ||||
7586 | State.FreeRegs = 0; | ||||
7587 | return false; | ||||
7588 | } | ||||
7589 | |||||
7590 | State.FreeRegs -= SizeInRegs; | ||||
7591 | |||||
7592 | return true; | ||||
7593 | } | ||||
7594 | |||||
7595 | ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal, | ||||
7596 | CCState &State) const { | ||||
7597 | if (!ByVal) { | ||||
7598 | if (State.FreeRegs) { | ||||
7599 | --State.FreeRegs; // Non-byval indirects just use one pointer. | ||||
7600 | return getNaturalAlignIndirectInReg(Ty); | ||||
7601 | } | ||||
7602 | return getNaturalAlignIndirect(Ty, false); | ||||
7603 | } | ||||
7604 | |||||
7605 | // Compute the byval alignment. | ||||
7606 | const unsigned MinABIStackAlignInBytes = 4; | ||||
7607 | unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; | ||||
7608 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, | ||||
7609 | /*Realign=*/TypeAlign > | ||||
7610 | MinABIStackAlignInBytes); | ||||
7611 | } | ||||
7612 | |||||
7613 | ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty, | ||||
7614 | CCState &State) const { | ||||
7615 | // Check with the C++ ABI first. | ||||
7616 | const RecordType *RT = Ty->getAs<RecordType>(); | ||||
7617 | if (RT) { | ||||
7618 | CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); | ||||
7619 | if (RAA == CGCXXABI::RAA_Indirect) { | ||||
7620 | return getIndirectResult(Ty, /*ByVal=*/false, State); | ||||
7621 | } else if (RAA == CGCXXABI::RAA_DirectInMemory) { | ||||
7622 | return getNaturalAlignIndirect(Ty, /*ByRef=*/true); | ||||
7623 | } | ||||
7624 | } | ||||
7625 | |||||
7626 | if (isAggregateTypeForABI(Ty)) { | ||||
7627 | // Structures with flexible arrays are always indirect. | ||||
7628 | if (RT && RT->getDecl()->hasFlexibleArrayMember()) | ||||
7629 | return getIndirectResult(Ty, /*ByVal=*/true, State); | ||||
7630 | |||||
7631 | // Ignore empty structs/unions. | ||||
7632 | if (isEmptyRecord(getContext(), Ty, true)) | ||||
7633 | return ABIArgInfo::getIgnore(); | ||||
7634 | |||||
7635 | llvm::LLVMContext &LLVMContext = getVMContext(); | ||||
7636 | unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; | ||||
7637 | if (SizeInRegs <= State.FreeRegs) { | ||||
7638 | llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); | ||||
7639 | SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); | ||||
7640 | llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); | ||||
7641 | State.FreeRegs -= SizeInRegs; | ||||
7642 | return ABIArgInfo::getDirectInReg(Result); | ||||
7643 | } else { | ||||
7644 | State.FreeRegs = 0; | ||||
7645 | } | ||||
7646 | return getIndirectResult(Ty, true, State); | ||||
7647 | } | ||||
7648 | |||||
7649 | // Treat an enum type as its underlying type. | ||||
7650 | if (const auto *EnumTy = Ty->getAs<EnumType>()) | ||||
7651 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
7652 | |||||
7653 | bool InReg = shouldUseInReg(Ty, State); | ||||
7654 | if (Ty->isPromotableIntegerType()) { | ||||
7655 | if (InReg) | ||||
7656 | return ABIArgInfo::getDirectInReg(); | ||||
7657 | return ABIArgInfo::getExtend(Ty); | ||||
7658 | } | ||||
7659 | if (InReg) | ||||
7660 | return ABIArgInfo::getDirectInReg(); | ||||
7661 | return ABIArgInfo::getDirect(); | ||||
7662 | } | ||||
7663 | |||||
7664 | namespace { | ||||
7665 | class LanaiTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
7666 | public: | ||||
7667 | LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) | ||||
7668 | : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {} | ||||
7669 | }; | ||||
7670 | } | ||||
7671 | |||||
7672 | //===----------------------------------------------------------------------===// | ||||
7673 | // AMDGPU ABI Implementation | ||||
7674 | //===----------------------------------------------------------------------===// | ||||
7675 | |||||
7676 | namespace { | ||||
7677 | |||||
7678 | class AMDGPUABIInfo final : public DefaultABIInfo { | ||||
7679 | private: | ||||
7680 | static const unsigned MaxNumRegsForArgsRet = 16; | ||||
7681 | |||||
7682 | unsigned numRegsForType(QualType Ty) const; | ||||
7683 | |||||
7684 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; | ||||
7685 | bool isHomogeneousAggregateSmallEnough(const Type *Base, | ||||
7686 | uint64_t Members) const override; | ||||
7687 | |||||
7688 | public: | ||||
7689 | explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : | ||||
7690 | DefaultABIInfo(CGT) {} | ||||
7691 | |||||
7692 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
7693 | ABIArgInfo classifyKernelArgumentType(QualType Ty) const; | ||||
7694 | ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const; | ||||
7695 | |||||
7696 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
7697 | }; | ||||
7698 | |||||
7699 | bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { | ||||
7700 | return true; | ||||
7701 | } | ||||
7702 | |||||
7703 | bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough( | ||||
7704 | const Type *Base, uint64_t Members) const { | ||||
7705 | uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32; | ||||
7706 | |||||
7707 | // Homogeneous Aggregates may occupy at most 16 registers. | ||||
7708 | return Members * NumRegs <= MaxNumRegsForArgsRet; | ||||
7709 | } | ||||
7710 | |||||
7711 | /// Estimate number of registers the type will use when passed in registers. | ||||
7712 | unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const { | ||||
7713 | unsigned NumRegs = 0; | ||||
7714 | |||||
7715 | if (const VectorType *VT = Ty->getAs<VectorType>()) { | ||||
7716 | // Compute from the number of elements. The reported size is based on the | ||||
7717 | // in-memory size, which includes the padding 4th element for 3-vectors. | ||||
7718 | QualType EltTy = VT->getElementType(); | ||||
7719 | unsigned EltSize = getContext().getTypeSize(EltTy); | ||||
7720 | |||||
7721 | // 16-bit element vectors should be passed as packed. | ||||
7722 | if (EltSize == 16) | ||||
7723 | return (VT->getNumElements() + 1) / 2; | ||||
7724 | |||||
7725 | unsigned EltNumRegs = (EltSize + 31) / 32; | ||||
7726 | return EltNumRegs * VT->getNumElements(); | ||||
7727 | } | ||||
7728 | |||||
7729 | if (const RecordType *RT = Ty->getAs<RecordType>()) { | ||||
7730 | const RecordDecl *RD = RT->getDecl(); | ||||
7731 | assert(!RD->hasFlexibleArrayMember())((!RD->hasFlexibleArrayMember()) ? static_cast<void> (0) : __assert_fail ("!RD->hasFlexibleArrayMember()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 7731, __PRETTY_FUNCTION__)); | ||||
7732 | |||||
7733 | for (const FieldDecl *Field : RD->fields()) { | ||||
7734 | QualType FieldTy = Field->getType(); | ||||
7735 | NumRegs += numRegsForType(FieldTy); | ||||
7736 | } | ||||
7737 | |||||
7738 | return NumRegs; | ||||
7739 | } | ||||
7740 | |||||
7741 | return (getContext().getTypeSize(Ty) + 31) / 32; | ||||
7742 | } | ||||
7743 | |||||
7744 | void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
7745 | llvm::CallingConv::ID CC = FI.getCallingConvention(); | ||||
7746 | |||||
7747 | if (!getCXXABI().classifyReturnType(FI)) | ||||
7748 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
7749 | |||||
7750 | unsigned NumRegsLeft = MaxNumRegsForArgsRet; | ||||
7751 | for (auto &Arg : FI.arguments()) { | ||||
7752 | if (CC == llvm::CallingConv::AMDGPU_KERNEL) { | ||||
7753 | Arg.info = classifyKernelArgumentType(Arg.type); | ||||
7754 | } else { | ||||
7755 | Arg.info = classifyArgumentType(Arg.type, NumRegsLeft); | ||||
7756 | } | ||||
7757 | } | ||||
7758 | } | ||||
7759 | |||||
7760 | ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const { | ||||
7761 | if (isAggregateTypeForABI(RetTy)) { | ||||
7762 | // Records with non-trivial destructors/copy-constructors should not be | ||||
7763 | // returned by value. | ||||
7764 | if (!getRecordArgABI(RetTy, getCXXABI())) { | ||||
7765 | // Ignore empty structs/unions. | ||||
7766 | if (isEmptyRecord(getContext(), RetTy, true)) | ||||
7767 | return ABIArgInfo::getIgnore(); | ||||
7768 | |||||
7769 | // Lower single-element structs to just return a regular value. | ||||
7770 | if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) | ||||
7771 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); | ||||
7772 | |||||
7773 | if (const RecordType *RT = RetTy->getAs<RecordType>()) { | ||||
7774 | const RecordDecl *RD = RT->getDecl(); | ||||
7775 | if (RD->hasFlexibleArrayMember()) | ||||
7776 | return DefaultABIInfo::classifyReturnType(RetTy); | ||||
7777 | } | ||||
7778 | |||||
7779 | // Pack aggregates <= 4 bytes into single VGPR or pair. | ||||
7780 | uint64_t Size = getContext().getTypeSize(RetTy); | ||||
7781 | if (Size <= 16) | ||||
7782 | return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); | ||||
7783 | |||||
7784 | if (Size <= 32) | ||||
7785 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); | ||||
7786 | |||||
7787 | if (Size <= 64) { | ||||
7788 | llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); | ||||
7789 | return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); | ||||
7790 | } | ||||
7791 | |||||
7792 | if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet) | ||||
7793 | return ABIArgInfo::getDirect(); | ||||
7794 | } | ||||
7795 | } | ||||
7796 | |||||
7797 | // Otherwise just do the default thing. | ||||
7798 | return DefaultABIInfo::classifyReturnType(RetTy); | ||||
7799 | } | ||||
7800 | |||||
7801 | /// For kernels all parameters are really passed in a special buffer. It doesn't | ||||
7802 | /// make sense to pass anything byval, so everything must be direct. | ||||
7803 | ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const { | ||||
7804 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
7805 | |||||
7806 | // TODO: Can we omit empty structs? | ||||
7807 | |||||
7808 | // Coerce single element structs to its element. | ||||
7809 | if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) | ||||
7810 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); | ||||
7811 | |||||
7812 | // If we set CanBeFlattened to true, CodeGen will expand the struct to its | ||||
7813 | // individual elements, which confuses the Clover OpenCL backend; therefore we | ||||
7814 | // have to set it to false here. Other args of getDirect() are just defaults. | ||||
7815 | return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); | ||||
7816 | } | ||||
7817 | |||||
7818 | ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, | ||||
7819 | unsigned &NumRegsLeft) const { | ||||
7820 | assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow")((NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow" ) ? static_cast<void> (0) : __assert_fail ("NumRegsLeft <= MaxNumRegsForArgsRet && \"register estimate underflow\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 7820, __PRETTY_FUNCTION__)); | ||||
7821 | |||||
7822 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
7823 | |||||
7824 | if (isAggregateTypeForABI(Ty)) { | ||||
7825 | // Records with non-trivial destructors/copy-constructors should not be | ||||
7826 | // passed by value. | ||||
7827 | if (auto RAA = getRecordArgABI(Ty, getCXXABI())) | ||||
7828 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
7829 | |||||
7830 | // Ignore empty structs/unions. | ||||
7831 | if (isEmptyRecord(getContext(), Ty, true)) | ||||
7832 | return ABIArgInfo::getIgnore(); | ||||
7833 | |||||
7834 | // Lower single-element structs to just pass a regular value. TODO: We | ||||
7835 | // could do reasonable-size multiple-element structs too, using getExpand(), | ||||
7836 | // though watch out for things like bitfields. | ||||
7837 | if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) | ||||
7838 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); | ||||
7839 | |||||
7840 | if (const RecordType *RT = Ty->getAs<RecordType>()) { | ||||
7841 | const RecordDecl *RD = RT->getDecl(); | ||||
7842 | if (RD->hasFlexibleArrayMember()) | ||||
7843 | return DefaultABIInfo::classifyArgumentType(Ty); | ||||
7844 | } | ||||
7845 | |||||
7846 | // Pack aggregates <= 8 bytes into single VGPR or pair. | ||||
7847 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
7848 | if (Size <= 64) { | ||||
7849 | unsigned NumRegs = (Size + 31) / 32; | ||||
7850 | NumRegsLeft -= std::min(NumRegsLeft, NumRegs); | ||||
7851 | |||||
7852 | if (Size <= 16) | ||||
7853 | return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); | ||||
7854 | |||||
7855 | if (Size <= 32) | ||||
7856 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); | ||||
7857 | |||||
7858 | // XXX: Should this be i64 instead, and should the limit increase? | ||||
7859 | llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); | ||||
7860 | return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); | ||||
7861 | } | ||||
7862 | |||||
7863 | if (NumRegsLeft > 0) { | ||||
7864 | unsigned NumRegs = numRegsForType(Ty); | ||||
7865 | if (NumRegsLeft >= NumRegs) { | ||||
7866 | NumRegsLeft -= NumRegs; | ||||
7867 | return ABIArgInfo::getDirect(); | ||||
7868 | } | ||||
7869 | } | ||||
7870 | } | ||||
7871 | |||||
7872 | // Otherwise just do the default thing. | ||||
7873 | ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty); | ||||
7874 | if (!ArgInfo.isIndirect()) { | ||||
7875 | unsigned NumRegs = numRegsForType(Ty); | ||||
7876 | NumRegsLeft -= std::min(NumRegs, NumRegsLeft); | ||||
7877 | } | ||||
7878 | |||||
7879 | return ArgInfo; | ||||
7880 | } | ||||
7881 | |||||
7882 | class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
7883 | public: | ||||
7884 | AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) | ||||
7885 | : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {} | ||||
7886 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
7887 | CodeGen::CodeGenModule &M) const override; | ||||
7888 | unsigned getOpenCLKernelCallingConv() const override; | ||||
7889 | |||||
7890 | llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM, | ||||
7891 | llvm::PointerType *T, QualType QT) const override; | ||||
7892 | |||||
7893 | LangAS getASTAllocaAddressSpace() const override { | ||||
7894 | return getLangASFromTargetAS( | ||||
7895 | getABIInfo().getDataLayout().getAllocaAddrSpace()); | ||||
7896 | } | ||||
7897 | LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, | ||||
7898 | const VarDecl *D) const override; | ||||
7899 | llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, | ||||
7900 | SyncScope Scope, | ||||
7901 | llvm::AtomicOrdering Ordering, | ||||
7902 | llvm::LLVMContext &Ctx) const override; | ||||
7903 | llvm::Function * | ||||
7904 | createEnqueuedBlockKernel(CodeGenFunction &CGF, | ||||
7905 | llvm::Function *BlockInvokeFunc, | ||||
7906 | llvm::Value *BlockLiteral) const override; | ||||
7907 | bool shouldEmitStaticExternCAliases() const override; | ||||
7908 | void setCUDAKernelCallingConvention(const FunctionType *&FT) const override; | ||||
7909 | }; | ||||
7910 | } | ||||
7911 | |||||
7912 | static bool requiresAMDGPUProtectedVisibility(const Decl *D, | ||||
7913 | llvm::GlobalValue *GV) { | ||||
7914 | if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility) | ||||
7915 | return false; | ||||
7916 | |||||
7917 | return D->hasAttr<OpenCLKernelAttr>() || | ||||
7918 | (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) || | ||||
7919 | (isa<VarDecl>(D) && | ||||
7920 | (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || | ||||
7921 | D->hasAttr<HIPPinnedShadowAttr>())); | ||||
7922 | } | ||||
7923 | |||||
7924 | static bool requiresAMDGPUDefaultVisibility(const Decl *D, | ||||
7925 | llvm::GlobalValue *GV) { | ||||
7926 | if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility) | ||||
7927 | return false; | ||||
7928 | |||||
7929 | return isa<VarDecl>(D) && D->hasAttr<HIPPinnedShadowAttr>(); | ||||
7930 | } | ||||
7931 | |||||
7932 | void AMDGPUTargetCodeGenInfo::setTargetAttributes( | ||||
7933 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { | ||||
7934 | if (requiresAMDGPUDefaultVisibility(D, GV)) { | ||||
7935 | GV->setVisibility(llvm::GlobalValue::DefaultVisibility); | ||||
7936 | GV->setDSOLocal(false); | ||||
7937 | } else if (requiresAMDGPUProtectedVisibility(D, GV)) { | ||||
7938 | GV->setVisibility(llvm::GlobalValue::ProtectedVisibility); | ||||
7939 | GV->setDSOLocal(true); | ||||
7940 | } | ||||
7941 | |||||
7942 | if (GV->isDeclaration()) | ||||
7943 | return; | ||||
7944 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); | ||||
7945 | if (!FD) | ||||
7946 | return; | ||||
7947 | |||||
7948 | llvm::Function *F = cast<llvm::Function>(GV); | ||||
7949 | |||||
7950 | const auto *ReqdWGS = M.getLangOpts().OpenCL ? | ||||
7951 | FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr; | ||||
7952 | |||||
7953 | |||||
7954 | const bool IsOpenCLKernel = M.getLangOpts().OpenCL && | ||||
7955 | FD->hasAttr<OpenCLKernelAttr>(); | ||||
7956 | const bool IsHIPKernel = M.getLangOpts().HIP && | ||||
7957 | FD->hasAttr<CUDAGlobalAttr>(); | ||||
7958 | if ((IsOpenCLKernel || IsHIPKernel) && | ||||
7959 | (M.getTriple().getOS() == llvm::Triple::AMDHSA)) | ||||
7960 | F->addFnAttr("amdgpu-implicitarg-num-bytes", "56"); | ||||
7961 | |||||
7962 | const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>(); | ||||
7963 | if (ReqdWGS || FlatWGS) { | ||||
7964 | unsigned Min = 0; | ||||
7965 | unsigned Max = 0; | ||||
7966 | if (FlatWGS) { | ||||
7967 | Min = FlatWGS->getMin() | ||||
7968 | ->EvaluateKnownConstInt(M.getContext()) | ||||
7969 | .getExtValue(); | ||||
7970 | Max = FlatWGS->getMax() | ||||
7971 | ->EvaluateKnownConstInt(M.getContext()) | ||||
7972 | .getExtValue(); | ||||
7973 | } | ||||
7974 | if (ReqdWGS && Min == 0 && Max == 0) | ||||
7975 | Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim(); | ||||
7976 | |||||
7977 | if (Min != 0) { | ||||
7978 | assert(Min <= Max && "Min must be less than or equal Max")((Min <= Max && "Min must be less than or equal Max" ) ? static_cast<void> (0) : __assert_fail ("Min <= Max && \"Min must be less than or equal Max\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 7978, __PRETTY_FUNCTION__)); | ||||
7979 | |||||
7980 | std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max); | ||||
7981 | F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); | ||||
7982 | } else | ||||
7983 | assert(Max == 0 && "Max must be zero")((Max == 0 && "Max must be zero") ? static_cast<void > (0) : __assert_fail ("Max == 0 && \"Max must be zero\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 7983, __PRETTY_FUNCTION__)); | ||||
7984 | } else if (IsOpenCLKernel || IsHIPKernel) { | ||||
7985 | // By default, restrict the maximum size to 256. | ||||
7986 | F->addFnAttr("amdgpu-flat-work-group-size", "1,256"); | ||||
7987 | } | ||||
7988 | |||||
7989 | if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) { | ||||
7990 | unsigned Min = | ||||
7991 | Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue(); | ||||
7992 | unsigned Max = Attr->getMax() ? Attr->getMax() | ||||
7993 | ->EvaluateKnownConstInt(M.getContext()) | ||||
7994 | .getExtValue() | ||||
7995 | : 0; | ||||
7996 | |||||
7997 | if (Min != 0) { | ||||
7998 | assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max")(((Max == 0 || Min <= Max) && "Min must be less than or equal Max" ) ? static_cast<void> (0) : __assert_fail ("(Max == 0 || Min <= Max) && \"Min must be less than or equal Max\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 7998, __PRETTY_FUNCTION__)); | ||||
7999 | |||||
8000 | std::string AttrVal = llvm::utostr(Min); | ||||
8001 | if (Max != 0) | ||||
8002 | AttrVal = AttrVal + "," + llvm::utostr(Max); | ||||
8003 | F->addFnAttr("amdgpu-waves-per-eu", AttrVal); | ||||
8004 | } else | ||||
8005 | assert(Max == 0 && "Max must be zero")((Max == 0 && "Max must be zero") ? static_cast<void > (0) : __assert_fail ("Max == 0 && \"Max must be zero\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8005, __PRETTY_FUNCTION__)); | ||||
8006 | } | ||||
8007 | |||||
8008 | if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { | ||||
8009 | unsigned NumSGPR = Attr->getNumSGPR(); | ||||
8010 | |||||
8011 | if (NumSGPR != 0) | ||||
8012 | F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR)); | ||||
8013 | } | ||||
8014 | |||||
8015 | if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { | ||||
8016 | uint32_t NumVGPR = Attr->getNumVGPR(); | ||||
8017 | |||||
8018 | if (NumVGPR != 0) | ||||
8019 | F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR)); | ||||
8020 | } | ||||
8021 | } | ||||
8022 | |||||
8023 | unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const { | ||||
8024 | return llvm::CallingConv::AMDGPU_KERNEL; | ||||
8025 | } | ||||
8026 | |||||
8027 | // Currently LLVM assumes null pointers always have value 0, | ||||
8028 | // which results in incorrectly transformed IR. Therefore, instead of | ||||
8029 | // emitting null pointers in private and local address spaces, a null | ||||
8030 | // pointer in generic address space is emitted which is casted to a | ||||
8031 | // pointer in local or private address space. | ||||
8032 | llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer( | ||||
8033 | const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT, | ||||
8034 | QualType QT) const { | ||||
8035 | if (CGM.getContext().getTargetNullPointerValue(QT) == 0) | ||||
8036 | return llvm::ConstantPointerNull::get(PT); | ||||
8037 | |||||
8038 | auto &Ctx = CGM.getContext(); | ||||
8039 | auto NPT = llvm::PointerType::get(PT->getElementType(), | ||||
8040 | Ctx.getTargetAddressSpace(LangAS::opencl_generic)); | ||||
8041 | return llvm::ConstantExpr::getAddrSpaceCast( | ||||
8042 | llvm::ConstantPointerNull::get(NPT), PT); | ||||
8043 | } | ||||
8044 | |||||
8045 | LangAS | ||||
8046 | AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, | ||||
8047 | const VarDecl *D) const { | ||||
8048 | assert(!CGM.getLangOpts().OpenCL &&((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only" ) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8050, __PRETTY_FUNCTION__)) | ||||
8049 | !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only" ) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8050, __PRETTY_FUNCTION__)) | ||||
8050 | "Address space agnostic languages only")((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only" ) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8050, __PRETTY_FUNCTION__)); | ||||
8051 | LangAS DefaultGlobalAS = getLangASFromTargetAS( | ||||
8052 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_global)); | ||||
8053 | if (!D) | ||||
8054 | return DefaultGlobalAS; | ||||
8055 | |||||
8056 | LangAS AddrSpace = D->getType().getAddressSpace(); | ||||
8057 | assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace))((AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace )) ? static_cast<void> (0) : __assert_fail ("AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace)" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8057, __PRETTY_FUNCTION__)); | ||||
8058 | if (AddrSpace != LangAS::Default) | ||||
8059 | return AddrSpace; | ||||
8060 | |||||
8061 | if (CGM.isTypeConstant(D->getType(), false)) { | ||||
8062 | if (auto ConstAS = CGM.getTarget().getConstantAddressSpace()) | ||||
8063 | return ConstAS.getValue(); | ||||
8064 | } | ||||
8065 | return DefaultGlobalAS; | ||||
8066 | } | ||||
8067 | |||||
8068 | llvm::SyncScope::ID | ||||
8069 | AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, | ||||
8070 | SyncScope Scope, | ||||
8071 | llvm::AtomicOrdering Ordering, | ||||
8072 | llvm::LLVMContext &Ctx) const { | ||||
8073 | std::string Name; | ||||
8074 | switch (Scope) { | ||||
8075 | case SyncScope::OpenCLWorkGroup: | ||||
8076 | Name = "workgroup"; | ||||
8077 | break; | ||||
8078 | case SyncScope::OpenCLDevice: | ||||
8079 | Name = "agent"; | ||||
8080 | break; | ||||
8081 | case SyncScope::OpenCLAllSVMDevices: | ||||
8082 | Name = ""; | ||||
8083 | break; | ||||
8084 | case SyncScope::OpenCLSubGroup: | ||||
8085 | Name = "wavefront"; | ||||
8086 | } | ||||
8087 | |||||
8088 | if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) { | ||||
8089 | if (!Name.empty()) | ||||
8090 | Name = Twine(Twine(Name) + Twine("-")).str(); | ||||
8091 | |||||
8092 | Name = Twine(Twine(Name) + Twine("one-as")).str(); | ||||
8093 | } | ||||
8094 | |||||
8095 | return Ctx.getOrInsertSyncScopeID(Name); | ||||
8096 | } | ||||
8097 | |||||
8098 | bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { | ||||
8099 | return false; | ||||
8100 | } | ||||
8101 | |||||
8102 | void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention( | ||||
8103 | const FunctionType *&FT) const { | ||||
8104 | FT = getABIInfo().getContext().adjustFunctionType( | ||||
8105 | FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel)); | ||||
8106 | } | ||||
8107 | |||||
8108 | //===----------------------------------------------------------------------===// | ||||
8109 | // SPARC v8 ABI Implementation. | ||||
8110 | // Based on the SPARC Compliance Definition version 2.4.1. | ||||
8111 | // | ||||
8112 | // Ensures that complex values are passed in registers. | ||||
8113 | // | ||||
8114 | namespace { | ||||
8115 | class SparcV8ABIInfo : public DefaultABIInfo { | ||||
8116 | public: | ||||
8117 | SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} | ||||
8118 | |||||
8119 | private: | ||||
8120 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
8121 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
8122 | }; | ||||
8123 | } // end anonymous namespace | ||||
8124 | |||||
8125 | |||||
8126 | ABIArgInfo | ||||
8127 | SparcV8ABIInfo::classifyReturnType(QualType Ty) const { | ||||
8128 | if (Ty->isAnyComplexType()) { | ||||
8129 | return ABIArgInfo::getDirect(); | ||||
8130 | } | ||||
8131 | else { | ||||
8132 | return DefaultABIInfo::classifyReturnType(Ty); | ||||
8133 | } | ||||
8134 | } | ||||
8135 | |||||
8136 | void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
8137 | |||||
8138 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
8139 | for (auto &Arg : FI.arguments()) | ||||
8140 | Arg.info = classifyArgumentType(Arg.type); | ||||
8141 | } | ||||
8142 | |||||
8143 | namespace { | ||||
8144 | class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo { | ||||
8145 | public: | ||||
8146 | SparcV8TargetCodeGenInfo(CodeGenTypes &CGT) | ||||
8147 | : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {} | ||||
8148 | }; | ||||
8149 | } // end anonymous namespace | ||||
8150 | |||||
8151 | //===----------------------------------------------------------------------===// | ||||
8152 | // SPARC v9 ABI Implementation. | ||||
8153 | // Based on the SPARC Compliance Definition version 2.4.1. | ||||
8154 | // | ||||
8155 | // Function arguments a mapped to a nominal "parameter array" and promoted to | ||||
8156 | // registers depending on their type. Each argument occupies 8 or 16 bytes in | ||||
8157 | // the array, structs larger than 16 bytes are passed indirectly. | ||||
8158 | // | ||||
8159 | // One case requires special care: | ||||
8160 | // | ||||
8161 | // struct mixed { | ||||
8162 | // int i; | ||||
8163 | // float f; | ||||
8164 | // }; | ||||
8165 | // | ||||
8166 | // When a struct mixed is passed by value, it only occupies 8 bytes in the | ||||
8167 | // parameter array, but the int is passed in an integer register, and the float | ||||
8168 | // is passed in a floating point register. This is represented as two arguments | ||||
8169 | // with the LLVM IR inreg attribute: | ||||
8170 | // | ||||
8171 | // declare void f(i32 inreg %i, float inreg %f) | ||||
8172 | // | ||||
8173 | // The code generator will only allocate 4 bytes from the parameter array for | ||||
8174 | // the inreg arguments. All other arguments are allocated a multiple of 8 | ||||
8175 | // bytes. | ||||
8176 | // | ||||
8177 | namespace { | ||||
8178 | class SparcV9ABIInfo : public ABIInfo { | ||||
8179 | public: | ||||
8180 | SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} | ||||
8181 | |||||
8182 | private: | ||||
8183 | ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; | ||||
8184 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
8185 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
8186 | QualType Ty) const override; | ||||
8187 | |||||
8188 | // Coercion type builder for structs passed in registers. The coercion type | ||||
8189 | // serves two purposes: | ||||
8190 | // | ||||
8191 | // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' | ||||
8192 | // in registers. | ||||
8193 | // 2. Expose aligned floating point elements as first-level elements, so the | ||||
8194 | // code generator knows to pass them in floating point registers. | ||||
8195 | // | ||||
8196 | // We also compute the InReg flag which indicates that the struct contains | ||||
8197 | // aligned 32-bit floats. | ||||
8198 | // | ||||
8199 | struct CoerceBuilder { | ||||
8200 | llvm::LLVMContext &Context; | ||||
8201 | const llvm::DataLayout &DL; | ||||
8202 | SmallVector<llvm::Type*, 8> Elems; | ||||
8203 | uint64_t Size; | ||||
8204 | bool InReg; | ||||
8205 | |||||
8206 | CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) | ||||
8207 | : Context(c), DL(dl), Size(0), InReg(false) {} | ||||
8208 | |||||
8209 | // Pad Elems with integers until Size is ToSize. | ||||
8210 | void pad(uint64_t ToSize) { | ||||
8211 | assert(ToSize >= Size && "Cannot remove elements")((ToSize >= Size && "Cannot remove elements") ? static_cast <void> (0) : __assert_fail ("ToSize >= Size && \"Cannot remove elements\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8211, __PRETTY_FUNCTION__)); | ||||
8212 | if (ToSize == Size) | ||||
8213 | return; | ||||
8214 | |||||
8215 | // Finish the current 64-bit word. | ||||
8216 | uint64_t Aligned = llvm::alignTo(Size, 64); | ||||
8217 | if (Aligned > Size && Aligned <= ToSize) { | ||||
8218 | Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); | ||||
8219 | Size = Aligned; | ||||
8220 | } | ||||
8221 | |||||
8222 | // Add whole 64-bit words. | ||||
8223 | while (Size + 64 <= ToSize) { | ||||
8224 | Elems.push_back(llvm::Type::getInt64Ty(Context)); | ||||
8225 | Size += 64; | ||||
8226 | } | ||||
8227 | |||||
8228 | // Final in-word padding. | ||||
8229 | if (Size < ToSize) { | ||||
8230 | Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); | ||||
8231 | Size = ToSize; | ||||
8232 | } | ||||
8233 | } | ||||
8234 | |||||
8235 | // Add a floating point element at Offset. | ||||
8236 | void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { | ||||
8237 | // Unaligned floats are treated as integers. | ||||
8238 | if (Offset % Bits) | ||||
8239 | return; | ||||
8240 | // The InReg flag is only required if there are any floats < 64 bits. | ||||
8241 | if (Bits < 64) | ||||
8242 | InReg = true; | ||||
8243 | pad(Offset); | ||||
8244 | Elems.push_back(Ty); | ||||
8245 | Size = Offset + Bits; | ||||
8246 | } | ||||
8247 | |||||
8248 | // Add a struct type to the coercion type, starting at Offset (in bits). | ||||
8249 | void addStruct(uint64_t Offset, llvm::StructType *StrTy) { | ||||
8250 | const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); | ||||
8251 | for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { | ||||
8252 | llvm::Type *ElemTy = StrTy->getElementType(i); | ||||
8253 | uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); | ||||
8254 | switch (ElemTy->getTypeID()) { | ||||
8255 | case llvm::Type::StructTyID: | ||||
8256 | addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); | ||||
8257 | break; | ||||
8258 | case llvm::Type::FloatTyID: | ||||
8259 | addFloat(ElemOffset, ElemTy, 32); | ||||
8260 | break; | ||||
8261 | case llvm::Type::DoubleTyID: | ||||
8262 | addFloat(ElemOffset, ElemTy, 64); | ||||
8263 | break; | ||||
8264 | case llvm::Type::FP128TyID: | ||||
8265 | addFloat(ElemOffset, ElemTy, 128); | ||||
8266 | break; | ||||
8267 | case llvm::Type::PointerTyID: | ||||
8268 | if (ElemOffset % 64 == 0) { | ||||
8269 | pad(ElemOffset); | ||||
8270 | Elems.push_back(ElemTy); | ||||
8271 | Size += 64; | ||||
8272 | } | ||||
8273 | break; | ||||
8274 | default: | ||||
8275 | break; | ||||
8276 | } | ||||
8277 | } | ||||
8278 | } | ||||
8279 | |||||
8280 | // Check if Ty is a usable substitute for the coercion type. | ||||
8281 | bool isUsableType(llvm::StructType *Ty) const { | ||||
8282 | return llvm::makeArrayRef(Elems) == Ty->elements(); | ||||
8283 | } | ||||
8284 | |||||
8285 | // Get the coercion type as a literal struct type. | ||||
8286 | llvm::Type *getType() const { | ||||
8287 | if (Elems.size() == 1) | ||||
8288 | return Elems.front(); | ||||
8289 | else | ||||
8290 | return llvm::StructType::get(Context, Elems); | ||||
8291 | } | ||||
8292 | }; | ||||
8293 | }; | ||||
8294 | } // end anonymous namespace | ||||
8295 | |||||
8296 | ABIArgInfo | ||||
8297 | SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { | ||||
8298 | if (Ty->isVoidType()) | ||||
8299 | return ABIArgInfo::getIgnore(); | ||||
8300 | |||||
8301 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
8302 | |||||
8303 | // Anything too big to fit in registers is passed with an explicit indirect | ||||
8304 | // pointer / sret pointer. | ||||
8305 | if (Size > SizeLimit) | ||||
8306 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
8307 | |||||
8308 | // Treat an enum type as its underlying type. | ||||
8309 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
8310 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
8311 | |||||
8312 | // Integer types smaller than a register are extended. | ||||
8313 | if (Size < 64 && Ty->isIntegerType()) | ||||
8314 | return ABIArgInfo::getExtend(Ty); | ||||
8315 | |||||
8316 | // Other non-aggregates go in registers. | ||||
8317 | if (!isAggregateTypeForABI(Ty)) | ||||
8318 | return ABIArgInfo::getDirect(); | ||||
8319 | |||||
8320 | // If a C++ object has either a non-trivial copy constructor or a non-trivial | ||||
8321 | // destructor, it is passed with an explicit indirect pointer / sret pointer. | ||||
8322 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) | ||||
8323 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); | ||||
8324 | |||||
8325 | // This is a small aggregate type that should be passed in registers. | ||||
8326 | // Build a coercion type from the LLVM struct type. | ||||
8327 | llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); | ||||
8328 | if (!StrTy) | ||||
8329 | return ABIArgInfo::getDirect(); | ||||
8330 | |||||
8331 | CoerceBuilder CB(getVMContext(), getDataLayout()); | ||||
8332 | CB.addStruct(0, StrTy); | ||||
8333 | CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64)); | ||||
8334 | |||||
8335 | // Try to use the original type for coercion. | ||||
8336 | llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); | ||||
8337 | |||||
8338 | if (CB.InReg) | ||||
8339 | return ABIArgInfo::getDirectInReg(CoerceTy); | ||||
8340 | else | ||||
8341 | return ABIArgInfo::getDirect(CoerceTy); | ||||
8342 | } | ||||
8343 | |||||
8344 | Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
8345 | QualType Ty) const { | ||||
8346 | ABIArgInfo AI = classifyType(Ty, 16 * 8); | ||||
8347 | llvm::Type *ArgTy = CGT.ConvertType(Ty); | ||||
8348 | if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) | ||||
8349 | AI.setCoerceToType(ArgTy); | ||||
8350 | |||||
8351 | CharUnits SlotSize = CharUnits::fromQuantity(8); | ||||
8352 | |||||
8353 | CGBuilderTy &Builder = CGF.Builder; | ||||
8354 | Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); | ||||
8355 | llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); | ||||
8356 | |||||
8357 | auto TypeInfo = getContext().getTypeInfoInChars(Ty); | ||||
8358 | |||||
8359 | Address ArgAddr = Address::invalid(); | ||||
8360 | CharUnits Stride; | ||||
8361 | switch (AI.getKind()) { | ||||
8362 | case ABIArgInfo::Expand: | ||||
8363 | case ABIArgInfo::CoerceAndExpand: | ||||
8364 | case ABIArgInfo::InAlloca: | ||||
8365 | llvm_unreachable("Unsupported ABI kind for va_arg")::llvm::llvm_unreachable_internal("Unsupported ABI kind for va_arg" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8365); | ||||
8366 | |||||
8367 | case ABIArgInfo::Extend: { | ||||
8368 | Stride = SlotSize; | ||||
8369 | CharUnits Offset = SlotSize - TypeInfo.first; | ||||
8370 | ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); | ||||
8371 | break; | ||||
8372 | } | ||||
8373 | |||||
8374 | case ABIArgInfo::Direct: { | ||||
8375 | auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); | ||||
8376 | Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); | ||||
8377 | ArgAddr = Addr; | ||||
8378 | break; | ||||
8379 | } | ||||
8380 | |||||
8381 | case ABIArgInfo::Indirect: | ||||
8382 | Stride = SlotSize; | ||||
8383 | ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect"); | ||||
8384 | ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), | ||||
8385 | TypeInfo.second); | ||||
8386 | break; | ||||
8387 | |||||
8388 | case ABIArgInfo::Ignore: | ||||
8389 | return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second); | ||||
8390 | } | ||||
8391 | |||||
8392 | // Update VAList. | ||||
8393 | Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next"); | ||||
8394 | Builder.CreateStore(NextPtr.getPointer(), VAListAddr); | ||||
8395 | |||||
8396 | return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr"); | ||||
8397 | } | ||||
8398 | |||||
8399 | void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
8400 | FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); | ||||
8401 | for (auto &I : FI.arguments()) | ||||
8402 | I.info = classifyType(I.type, 16 * 8); | ||||
8403 | } | ||||
8404 | |||||
8405 | namespace { | ||||
8406 | class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { | ||||
8407 | public: | ||||
8408 | SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) | ||||
8409 | : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} | ||||
8410 | |||||
8411 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { | ||||
8412 | return 14; | ||||
8413 | } | ||||
8414 | |||||
8415 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
8416 | llvm::Value *Address) const override; | ||||
8417 | }; | ||||
8418 | } // end anonymous namespace | ||||
8419 | |||||
8420 | bool | ||||
8421 | SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, | ||||
8422 | llvm::Value *Address) const { | ||||
8423 | // This is calculated from the LLVM and GCC tables and verified | ||||
8424 | // against gcc output. AFAIK all ABIs use the same encoding. | ||||
8425 | |||||
8426 | CodeGen::CGBuilderTy &Builder = CGF.Builder; | ||||
8427 | |||||
8428 | llvm::IntegerType *i8 = CGF.Int8Ty; | ||||
8429 | llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); | ||||
8430 | llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); | ||||
8431 | |||||
8432 | // 0-31: the 8-byte general-purpose registers | ||||
8433 | AssignToArrayRange(Builder, Address, Eight8, 0, 31); | ||||
8434 | |||||
8435 | // 32-63: f0-31, the 4-byte floating-point registers | ||||
8436 | AssignToArrayRange(Builder, Address, Four8, 32, 63); | ||||
8437 | |||||
8438 | // Y = 64 | ||||
8439 | // PSR = 65 | ||||
8440 | // WIM = 66 | ||||
8441 | // TBR = 67 | ||||
8442 | // PC = 68 | ||||
8443 | // NPC = 69 | ||||
8444 | // FSR = 70 | ||||
8445 | // CSR = 71 | ||||
8446 | AssignToArrayRange(Builder, Address, Eight8, 64, 71); | ||||
8447 | |||||
8448 | // 72-87: d0-15, the 8-byte floating-point registers | ||||
8449 | AssignToArrayRange(Builder, Address, Eight8, 72, 87); | ||||
8450 | |||||
8451 | return false; | ||||
8452 | } | ||||
8453 | |||||
8454 | // ARC ABI implementation. | ||||
8455 | namespace { | ||||
8456 | |||||
8457 | class ARCABIInfo : public DefaultABIInfo { | ||||
8458 | public: | ||||
8459 | using DefaultABIInfo::DefaultABIInfo; | ||||
8460 | |||||
8461 | private: | ||||
8462 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
8463 | QualType Ty) const override; | ||||
8464 | |||||
8465 | void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const { | ||||
8466 | if (!State.FreeRegs) | ||||
8467 | return; | ||||
8468 | if (Info.isIndirect() && Info.getInReg()) | ||||
8469 | State.FreeRegs--; | ||||
8470 | else if (Info.isDirect() && Info.getInReg()) { | ||||
8471 | unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32; | ||||
8472 | if (sz < State.FreeRegs) | ||||
8473 | State.FreeRegs -= sz; | ||||
8474 | else | ||||
8475 | State.FreeRegs = 0; | ||||
8476 | } | ||||
8477 | } | ||||
8478 | |||||
8479 | void computeInfo(CGFunctionInfo &FI) const override { | ||||
8480 | CCState State(FI.getCallingConvention()); | ||||
8481 | // ARC uses 8 registers to pass arguments. | ||||
8482 | State.FreeRegs = 8; | ||||
8483 | |||||
8484 | if (!getCXXABI().classifyReturnType(FI)) | ||||
8485 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); | ||||
8486 | updateState(FI.getReturnInfo(), FI.getReturnType(), State); | ||||
8487 | for (auto &I : FI.arguments()) { | ||||
8488 | I.info = classifyArgumentType(I.type, State.FreeRegs); | ||||
8489 | updateState(I.info, I.type, State); | ||||
8490 | } | ||||
8491 | } | ||||
8492 | |||||
8493 | ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const; | ||||
8494 | ABIArgInfo getIndirectByValue(QualType Ty) const; | ||||
8495 | ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const; | ||||
8496 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
8497 | }; | ||||
8498 | |||||
8499 | class ARCTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
8500 | public: | ||||
8501 | ARCTargetCodeGenInfo(CodeGenTypes &CGT) | ||||
8502 | : TargetCodeGenInfo(new ARCABIInfo(CGT)) {} | ||||
8503 | }; | ||||
8504 | |||||
8505 | |||||
8506 | ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const { | ||||
8507 | return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) : | ||||
8508 | getNaturalAlignIndirect(Ty, false); | ||||
8509 | } | ||||
8510 | |||||
8511 | ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const { | ||||
8512 | // Compute the byval alignment. | ||||
8513 | const unsigned MinABIStackAlignInBytes = 4; | ||||
8514 | unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; | ||||
8515 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, | ||||
8516 | TypeAlign > MinABIStackAlignInBytes); | ||||
8517 | } | ||||
8518 | |||||
8519 | Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
8520 | QualType Ty) const { | ||||
8521 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, | ||||
8522 | getContext().getTypeInfoInChars(Ty), | ||||
8523 | CharUnits::fromQuantity(4), true); | ||||
8524 | } | ||||
8525 | |||||
8526 | ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty, | ||||
8527 | uint8_t FreeRegs) const { | ||||
8528 | // Handle the generic C++ ABI. | ||||
8529 | const RecordType *RT = Ty->getAs<RecordType>(); | ||||
8530 | if (RT) { | ||||
8531 | CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); | ||||
8532 | if (RAA == CGCXXABI::RAA_Indirect) | ||||
8533 | return getIndirectByRef(Ty, FreeRegs > 0); | ||||
8534 | |||||
8535 | if (RAA == CGCXXABI::RAA_DirectInMemory) | ||||
8536 | return getIndirectByValue(Ty); | ||||
8537 | } | ||||
8538 | |||||
8539 | // Treat an enum type as its underlying type. | ||||
8540 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
8541 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
8542 | |||||
8543 | auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32; | ||||
8544 | |||||
8545 | if (isAggregateTypeForABI(Ty)) { | ||||
8546 | // Structures with flexible arrays are always indirect. | ||||
8547 | if (RT && RT->getDecl()->hasFlexibleArrayMember()) | ||||
8548 | return getIndirectByValue(Ty); | ||||
8549 | |||||
8550 | // Ignore empty structs/unions. | ||||
8551 | if (isEmptyRecord(getContext(), Ty, true)) | ||||
8552 | return ABIArgInfo::getIgnore(); | ||||
8553 | |||||
8554 | llvm::LLVMContext &LLVMContext = getVMContext(); | ||||
8555 | |||||
8556 | llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); | ||||
8557 | SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); | ||||
8558 | llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); | ||||
8559 | |||||
8560 | return FreeRegs >= SizeInRegs ? | ||||
8561 | ABIArgInfo::getDirectInReg(Result) : | ||||
8562 | ABIArgInfo::getDirect(Result, 0, nullptr, false); | ||||
8563 | } | ||||
8564 | |||||
8565 | return Ty->isPromotableIntegerType() ? | ||||
8566 | (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) : | ||||
8567 | ABIArgInfo::getExtend(Ty)) : | ||||
8568 | (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() : | ||||
8569 | ABIArgInfo::getDirect()); | ||||
8570 | } | ||||
8571 | |||||
8572 | ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const { | ||||
8573 | if (RetTy->isAnyComplexType()) | ||||
8574 | return ABIArgInfo::getDirectInReg(); | ||||
8575 | |||||
8576 | // Arguments of size > 4 registers are indirect. | ||||
8577 | auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32; | ||||
8578 | if (RetSize > 4) | ||||
8579 | return getIndirectByRef(RetTy, /*HasFreeRegs*/ true); | ||||
8580 | |||||
8581 | return DefaultABIInfo::classifyReturnType(RetTy); | ||||
8582 | } | ||||
8583 | |||||
8584 | } // End anonymous namespace. | ||||
8585 | |||||
8586 | //===----------------------------------------------------------------------===// | ||||
8587 | // XCore ABI Implementation | ||||
8588 | //===----------------------------------------------------------------------===// | ||||
8589 | |||||
8590 | namespace { | ||||
8591 | |||||
8592 | /// A SmallStringEnc instance is used to build up the TypeString by passing | ||||
8593 | /// it by reference between functions that append to it. | ||||
8594 | typedef llvm::SmallString<128> SmallStringEnc; | ||||
8595 | |||||
8596 | /// TypeStringCache caches the meta encodings of Types. | ||||
8597 | /// | ||||
8598 | /// The reason for caching TypeStrings is two fold: | ||||
8599 | /// 1. To cache a type's encoding for later uses; | ||||
8600 | /// 2. As a means to break recursive member type inclusion. | ||||
8601 | /// | ||||
8602 | /// A cache Entry can have a Status of: | ||||
8603 | /// NonRecursive: The type encoding is not recursive; | ||||
8604 | /// Recursive: The type encoding is recursive; | ||||
8605 | /// Incomplete: An incomplete TypeString; | ||||
8606 | /// IncompleteUsed: An incomplete TypeString that has been used in a | ||||
8607 | /// Recursive type encoding. | ||||
8608 | /// | ||||
8609 | /// A NonRecursive entry will have all of its sub-members expanded as fully | ||||
8610 | /// as possible. Whilst it may contain types which are recursive, the type | ||||
8611 | /// itself is not recursive and thus its encoding may be safely used whenever | ||||
8612 | /// the type is encountered. | ||||
8613 | /// | ||||
8614 | /// A Recursive entry will have all of its sub-members expanded as fully as | ||||
8615 | /// possible. The type itself is recursive and it may contain other types which | ||||
8616 | /// are recursive. The Recursive encoding must not be used during the expansion | ||||
8617 | /// of a recursive type's recursive branch. For simplicity the code uses | ||||
8618 | /// IncompleteCount to reject all usage of Recursive encodings for member types. | ||||
8619 | /// | ||||
8620 | /// An Incomplete entry is always a RecordType and only encodes its | ||||
8621 | /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and | ||||
8622 | /// are placed into the cache during type expansion as a means to identify and | ||||
8623 | /// handle recursive inclusion of types as sub-members. If there is recursion | ||||
8624 | /// the entry becomes IncompleteUsed. | ||||
8625 | /// | ||||
8626 | /// During the expansion of a RecordType's members: | ||||
8627 | /// | ||||
8628 | /// If the cache contains a NonRecursive encoding for the member type, the | ||||
8629 | /// cached encoding is used; | ||||
8630 | /// | ||||
8631 | /// If the cache contains a Recursive encoding for the member type, the | ||||
8632 | /// cached encoding is 'Swapped' out, as it may be incorrect, and... | ||||
8633 | /// | ||||
8634 | /// If the member is a RecordType, an Incomplete encoding is placed into the | ||||
8635 | /// cache to break potential recursive inclusion of itself as a sub-member; | ||||
8636 | /// | ||||
8637 | /// Once a member RecordType has been expanded, its temporary incomplete | ||||
8638 | /// entry is removed from the cache. If a Recursive encoding was swapped out | ||||
8639 | /// it is swapped back in; | ||||
8640 | /// | ||||
8641 | /// If an incomplete entry is used to expand a sub-member, the incomplete | ||||
8642 | /// entry is marked as IncompleteUsed. The cache keeps count of how many | ||||
8643 | /// IncompleteUsed entries it currently contains in IncompleteUsedCount; | ||||
8644 | /// | ||||
8645 | /// If a member's encoding is found to be a NonRecursive or Recursive viz: | ||||
8646 | /// IncompleteUsedCount==0, the member's encoding is added to the cache. | ||||
8647 | /// Else the member is part of a recursive type and thus the recursion has | ||||
8648 | /// been exited too soon for the encoding to be correct for the member. | ||||
8649 | /// | ||||
8650 | class TypeStringCache { | ||||
8651 | enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; | ||||
8652 | struct Entry { | ||||
8653 | std::string Str; // The encoded TypeString for the type. | ||||
8654 | enum Status State; // Information about the encoding in 'Str'. | ||||
8655 | std::string Swapped; // A temporary place holder for a Recursive encoding | ||||
8656 | // during the expansion of RecordType's members. | ||||
8657 | }; | ||||
8658 | std::map<const IdentifierInfo *, struct Entry> Map; | ||||
8659 | unsigned IncompleteCount; // Number of Incomplete entries in the Map. | ||||
8660 | unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. | ||||
8661 | public: | ||||
8662 | TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} | ||||
8663 | void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); | ||||
8664 | bool removeIncomplete(const IdentifierInfo *ID); | ||||
8665 | void addIfComplete(const IdentifierInfo *ID, StringRef Str, | ||||
8666 | bool IsRecursive); | ||||
8667 | StringRef lookupStr(const IdentifierInfo *ID); | ||||
8668 | }; | ||||
8669 | |||||
8670 | /// TypeString encodings for enum & union fields must be order. | ||||
8671 | /// FieldEncoding is a helper for this ordering process. | ||||
8672 | class FieldEncoding { | ||||
8673 | bool HasName; | ||||
8674 | std::string Enc; | ||||
8675 | public: | ||||
8676 | FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} | ||||
8677 | StringRef str() { return Enc; } | ||||
8678 | bool operator<(const FieldEncoding &rhs) const { | ||||
8679 | if (HasName != rhs.HasName) return HasName; | ||||
8680 | return Enc < rhs.Enc; | ||||
8681 | } | ||||
8682 | }; | ||||
8683 | |||||
8684 | class XCoreABIInfo : public DefaultABIInfo { | ||||
8685 | public: | ||||
8686 | XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} | ||||
8687 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
8688 | QualType Ty) const override; | ||||
8689 | }; | ||||
8690 | |||||
8691 | class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
8692 | mutable TypeStringCache TSC; | ||||
8693 | public: | ||||
8694 | XCoreTargetCodeGenInfo(CodeGenTypes &CGT) | ||||
8695 | :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} | ||||
8696 | void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, | ||||
8697 | CodeGen::CodeGenModule &M) const override; | ||||
8698 | }; | ||||
8699 | |||||
8700 | } // End anonymous namespace. | ||||
8701 | |||||
8702 | // TODO: this implementation is likely now redundant with the default | ||||
8703 | // EmitVAArg. | ||||
8704 | Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
8705 | QualType Ty) const { | ||||
8706 | CGBuilderTy &Builder = CGF.Builder; | ||||
8707 | |||||
8708 | // Get the VAList. | ||||
8709 | CharUnits SlotSize = CharUnits::fromQuantity(4); | ||||
8710 | Address AP(Builder.CreateLoad(VAListAddr), SlotSize); | ||||
8711 | |||||
8712 | // Handle the argument. | ||||
8713 | ABIArgInfo AI = classifyArgumentType(Ty); | ||||
8714 | CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty); | ||||
8715 | llvm::Type *ArgTy = CGT.ConvertType(Ty); | ||||
8716 | if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) | ||||
8717 | AI.setCoerceToType(ArgTy); | ||||
8718 | llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); | ||||
8719 | |||||
8720 | Address Val = Address::invalid(); | ||||
8721 | CharUnits ArgSize = CharUnits::Zero(); | ||||
8722 | switch (AI.getKind()) { | ||||
8723 | case ABIArgInfo::Expand: | ||||
8724 | case ABIArgInfo::CoerceAndExpand: | ||||
8725 | case ABIArgInfo::InAlloca: | ||||
8726 | llvm_unreachable("Unsupported ABI kind for va_arg")::llvm::llvm_unreachable_internal("Unsupported ABI kind for va_arg" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8726); | ||||
8727 | case ABIArgInfo::Ignore: | ||||
8728 | Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign); | ||||
8729 | ArgSize = CharUnits::Zero(); | ||||
8730 | break; | ||||
8731 | case ABIArgInfo::Extend: | ||||
8732 | case ABIArgInfo::Direct: | ||||
8733 | Val = Builder.CreateBitCast(AP, ArgPtrTy); | ||||
8734 | ArgSize = CharUnits::fromQuantity( | ||||
8735 | getDataLayout().getTypeAllocSize(AI.getCoerceToType())); | ||||
8736 | ArgSize = ArgSize.alignTo(SlotSize); | ||||
8737 | break; | ||||
8738 | case ABIArgInfo::Indirect: | ||||
8739 | Val = Builder.CreateElementBitCast(AP, ArgPtrTy); | ||||
8740 | Val = Address(Builder.CreateLoad(Val), TypeAlign); | ||||
8741 | ArgSize = SlotSize; | ||||
8742 | break; | ||||
8743 | } | ||||
8744 | |||||
8745 | // Increment the VAList. | ||||
8746 | if (!ArgSize.isZero()) { | ||||
8747 | Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize); | ||||
8748 | Builder.CreateStore(APN.getPointer(), VAListAddr); | ||||
8749 | } | ||||
8750 | |||||
8751 | return Val; | ||||
8752 | } | ||||
8753 | |||||
8754 | /// During the expansion of a RecordType, an incomplete TypeString is placed | ||||
8755 | /// into the cache as a means to identify and break recursion. | ||||
8756 | /// If there is a Recursive encoding in the cache, it is swapped out and will | ||||
8757 | /// be reinserted by removeIncomplete(). | ||||
8758 | /// All other types of encoding should have been used rather than arriving here. | ||||
8759 | void TypeStringCache::addIncomplete(const IdentifierInfo *ID, | ||||
8760 | std::string StubEnc) { | ||||
8761 | if (!ID) | ||||
8762 | return; | ||||
8763 | Entry &E = Map[ID]; | ||||
8764 | assert( (E.Str.empty() || E.State == Recursive) &&(((E.Str.empty() || E.State == Recursive) && "Incorrectly use of addIncomplete" ) ? static_cast<void> (0) : __assert_fail ("(E.Str.empty() || E.State == Recursive) && \"Incorrectly use of addIncomplete\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8765, __PRETTY_FUNCTION__)) | ||||
8765 | "Incorrectly use of addIncomplete")(((E.Str.empty() || E.State == Recursive) && "Incorrectly use of addIncomplete" ) ? static_cast<void> (0) : __assert_fail ("(E.Str.empty() || E.State == Recursive) && \"Incorrectly use of addIncomplete\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8765, __PRETTY_FUNCTION__)); | ||||
8766 | assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()")((!StubEnc.empty() && "Passing an empty string to addIncomplete()" ) ? static_cast<void> (0) : __assert_fail ("!StubEnc.empty() && \"Passing an empty string to addIncomplete()\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8766, __PRETTY_FUNCTION__)); | ||||
8767 | E.Swapped.swap(E.Str); // swap out the Recursive | ||||
8768 | E.Str.swap(StubEnc); | ||||
8769 | E.State = Incomplete; | ||||
8770 | ++IncompleteCount; | ||||
8771 | } | ||||
8772 | |||||
8773 | /// Once the RecordType has been expanded, the temporary incomplete TypeString | ||||
8774 | /// must be removed from the cache. | ||||
8775 | /// If a Recursive was swapped out by addIncomplete(), it will be replaced. | ||||
8776 | /// Returns true if the RecordType was defined recursively. | ||||
8777 | bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { | ||||
8778 | if (!ID) | ||||
8779 | return false; | ||||
8780 | auto I = Map.find(ID); | ||||
8781 | assert(I != Map.end() && "Entry not present")((I != Map.end() && "Entry not present") ? static_cast <void> (0) : __assert_fail ("I != Map.end() && \"Entry not present\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8781, __PRETTY_FUNCTION__)); | ||||
8782 | Entry &E = I->second; | ||||
8783 | assert( (E.State == Incomplete ||(((E.State == Incomplete || E.State == IncompleteUsed) && "Entry must be an incomplete type") ? static_cast<void> (0) : __assert_fail ("(E.State == Incomplete || E.State == IncompleteUsed) && \"Entry must be an incomplete type\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8785, __PRETTY_FUNCTION__)) | ||||
8784 | E.State == IncompleteUsed) &&(((E.State == Incomplete || E.State == IncompleteUsed) && "Entry must be an incomplete type") ? static_cast<void> (0) : __assert_fail ("(E.State == Incomplete || E.State == IncompleteUsed) && \"Entry must be an incomplete type\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8785, __PRETTY_FUNCTION__)) | ||||
8785 | "Entry must be an incomplete type")(((E.State == Incomplete || E.State == IncompleteUsed) && "Entry must be an incomplete type") ? static_cast<void> (0) : __assert_fail ("(E.State == Incomplete || E.State == IncompleteUsed) && \"Entry must be an incomplete type\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8785, __PRETTY_FUNCTION__)); | ||||
8786 | bool IsRecursive = false; | ||||
8787 | if (E.State == IncompleteUsed) { | ||||
8788 | // We made use of our Incomplete encoding, thus we are recursive. | ||||
8789 | IsRecursive = true; | ||||
8790 | --IncompleteUsedCount; | ||||
8791 | } | ||||
8792 | if (E.Swapped.empty()) | ||||
8793 | Map.erase(I); | ||||
8794 | else { | ||||
8795 | // Swap the Recursive back. | ||||
8796 | E.Swapped.swap(E.Str); | ||||
8797 | E.Swapped.clear(); | ||||
8798 | E.State = Recursive; | ||||
8799 | } | ||||
8800 | --IncompleteCount; | ||||
8801 | return IsRecursive; | ||||
8802 | } | ||||
8803 | |||||
8804 | /// Add the encoded TypeString to the cache only if it is NonRecursive or | ||||
8805 | /// Recursive (viz: all sub-members were expanded as fully as possible). | ||||
8806 | void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, | ||||
8807 | bool IsRecursive) { | ||||
8808 | if (!ID || IncompleteUsedCount) | ||||
8809 | return; // No key or it is is an incomplete sub-type so don't add. | ||||
8810 | Entry &E = Map[ID]; | ||||
8811 | if (IsRecursive && !E.Str.empty()) { | ||||
8812 | assert(E.State==Recursive && E.Str.size() == Str.size() &&((E.State==Recursive && E.Str.size() == Str.size() && "This is not the same Recursive entry") ? static_cast<void > (0) : __assert_fail ("E.State==Recursive && E.Str.size() == Str.size() && \"This is not the same Recursive entry\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8813, __PRETTY_FUNCTION__)) | ||||
8813 | "This is not the same Recursive entry")((E.State==Recursive && E.Str.size() == Str.size() && "This is not the same Recursive entry") ? static_cast<void > (0) : __assert_fail ("E.State==Recursive && E.Str.size() == Str.size() && \"This is not the same Recursive entry\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8813, __PRETTY_FUNCTION__)); | ||||
8814 | // The parent container was not recursive after all, so we could have used | ||||
8815 | // this Recursive sub-member entry after all, but we assumed the worse when | ||||
8816 | // we started viz: IncompleteCount!=0. | ||||
8817 | return; | ||||
8818 | } | ||||
8819 | assert(E.Str.empty() && "Entry already present")((E.Str.empty() && "Entry already present") ? static_cast <void> (0) : __assert_fail ("E.Str.empty() && \"Entry already present\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 8819, __PRETTY_FUNCTION__)); | ||||
8820 | E.Str = Str.str(); | ||||
8821 | E.State = IsRecursive? Recursive : NonRecursive; | ||||
8822 | } | ||||
8823 | |||||
8824 | /// Return a cached TypeString encoding for the ID. If there isn't one, or we | ||||
8825 | /// are recursively expanding a type (IncompleteCount != 0) and the cached | ||||
8826 | /// encoding is Recursive, return an empty StringRef. | ||||
8827 | StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { | ||||
8828 | if (!ID) | ||||
8829 | return StringRef(); // We have no key. | ||||
8830 | auto I = Map.find(ID); | ||||
8831 | if (I == Map.end()) | ||||
8832 | return StringRef(); // We have no encoding. | ||||
8833 | Entry &E = I->second; | ||||
8834 | if (E.State == Recursive && IncompleteCount) | ||||
8835 | return StringRef(); // We don't use Recursive encodings for member types. | ||||
8836 | |||||
8837 | if (E.State == Incomplete) { | ||||
8838 | // The incomplete type is being used to break out of recursion. | ||||
8839 | E.State = IncompleteUsed; | ||||
8840 | ++IncompleteUsedCount; | ||||
8841 | } | ||||
8842 | return E.Str; | ||||
8843 | } | ||||
8844 | |||||
8845 | /// The XCore ABI includes a type information section that communicates symbol | ||||
8846 | /// type information to the linker. The linker uses this information to verify | ||||
8847 | /// safety/correctness of things such as array bound and pointers et al. | ||||
8848 | /// The ABI only requires C (and XC) language modules to emit TypeStrings. | ||||
8849 | /// This type information (TypeString) is emitted into meta data for all global | ||||
8850 | /// symbols: definitions, declarations, functions & variables. | ||||
8851 | /// | ||||
8852 | /// The TypeString carries type, qualifier, name, size & value details. | ||||
8853 | /// Please see 'Tools Development Guide' section 2.16.2 for format details: | ||||
8854 | /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf | ||||
8855 | /// The output is tested by test/CodeGen/xcore-stringtype.c. | ||||
8856 | /// | ||||
8857 | static bool getTypeString(SmallStringEnc &Enc, const Decl *D, | ||||
8858 | CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); | ||||
8859 | |||||
8860 | /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. | ||||
8861 | void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, | ||||
8862 | CodeGen::CodeGenModule &CGM) const { | ||||
8863 | SmallStringEnc Enc; | ||||
8864 | if (getTypeString(Enc, D, CGM, TSC)) { | ||||
8865 | llvm::LLVMContext &Ctx = CGM.getModule().getContext(); | ||||
8866 | llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV), | ||||
8867 | llvm::MDString::get(Ctx, Enc.str())}; | ||||
8868 | llvm::NamedMDNode *MD = | ||||
8869 | CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); | ||||
8870 | MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); | ||||
8871 | } | ||||
8872 | } | ||||
8873 | |||||
8874 | //===----------------------------------------------------------------------===// | ||||
8875 | // SPIR ABI Implementation | ||||
8876 | //===----------------------------------------------------------------------===// | ||||
8877 | |||||
8878 | namespace { | ||||
8879 | class SPIRTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
8880 | public: | ||||
8881 | SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) | ||||
8882 | : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} | ||||
8883 | unsigned getOpenCLKernelCallingConv() const override; | ||||
8884 | }; | ||||
8885 | |||||
8886 | } // End anonymous namespace. | ||||
8887 | |||||
8888 | namespace clang { | ||||
8889 | namespace CodeGen { | ||||
8890 | void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { | ||||
8891 | DefaultABIInfo SPIRABI(CGM.getTypes()); | ||||
8892 | SPIRABI.computeInfo(FI); | ||||
8893 | } | ||||
8894 | } | ||||
8895 | } | ||||
8896 | |||||
8897 | unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { | ||||
8898 | return llvm::CallingConv::SPIR_KERNEL; | ||||
8899 | } | ||||
8900 | |||||
8901 | static bool appendType(SmallStringEnc &Enc, QualType QType, | ||||
8902 | const CodeGen::CodeGenModule &CGM, | ||||
8903 | TypeStringCache &TSC); | ||||
8904 | |||||
8905 | /// Helper function for appendRecordType(). | ||||
8906 | /// Builds a SmallVector containing the encoded field types in declaration | ||||
8907 | /// order. | ||||
8908 | static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, | ||||
8909 | const RecordDecl *RD, | ||||
8910 | const CodeGen::CodeGenModule &CGM, | ||||
8911 | TypeStringCache &TSC) { | ||||
8912 | for (const auto *Field : RD->fields()) { | ||||
8913 | SmallStringEnc Enc; | ||||
8914 | Enc += "m("; | ||||
8915 | Enc += Field->getName(); | ||||
8916 | Enc += "){"; | ||||
8917 | if (Field->isBitField()) { | ||||
8918 | Enc += "b("; | ||||
8919 | llvm::raw_svector_ostream OS(Enc); | ||||
8920 | OS << Field->getBitWidthValue(CGM.getContext()); | ||||
8921 | Enc += ':'; | ||||
8922 | } | ||||
8923 | if (!appendType(Enc, Field->getType(), CGM, TSC)) | ||||
8924 | return false; | ||||
8925 | if (Field->isBitField()) | ||||
8926 | Enc += ')'; | ||||
8927 | Enc += '}'; | ||||
8928 | FE.emplace_back(!Field->getName().empty(), Enc); | ||||
8929 | } | ||||
8930 | return true; | ||||
8931 | } | ||||
8932 | |||||
8933 | /// Appends structure and union types to Enc and adds encoding to cache. | ||||
8934 | /// Recursively calls appendType (via extractFieldType) for each field. | ||||
8935 | /// Union types have their fields ordered according to the ABI. | ||||
8936 | static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, | ||||
8937 | const CodeGen::CodeGenModule &CGM, | ||||
8938 | TypeStringCache &TSC, const IdentifierInfo *ID) { | ||||
8939 | // Append the cached TypeString if we have one. | ||||
8940 | StringRef TypeString = TSC.lookupStr(ID); | ||||
8941 | if (!TypeString.empty()) { | ||||
8942 | Enc += TypeString; | ||||
8943 | return true; | ||||
8944 | } | ||||
8945 | |||||
8946 | // Start to emit an incomplete TypeString. | ||||
8947 | size_t Start = Enc.size(); | ||||
8948 | Enc += (RT->isUnionType()? 'u' : 's'); | ||||
8949 | Enc += '('; | ||||
8950 | if (ID) | ||||
8951 | Enc += ID->getName(); | ||||
8952 | Enc += "){"; | ||||
8953 | |||||
8954 | // We collect all encoded fields and order as necessary. | ||||
8955 | bool IsRecursive = false; | ||||
8956 | const RecordDecl *RD = RT->getDecl()->getDefinition(); | ||||
8957 | if (RD && !RD->field_empty()) { | ||||
8958 | // An incomplete TypeString stub is placed in the cache for this RecordType | ||||
8959 | // so that recursive calls to this RecordType will use it whilst building a | ||||
8960 | // complete TypeString for this RecordType. | ||||
8961 | SmallVector<FieldEncoding, 16> FE; | ||||
8962 | std::string StubEnc(Enc.substr(Start).str()); | ||||
8963 | StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. | ||||
8964 | TSC.addIncomplete(ID, std::move(StubEnc)); | ||||
8965 | if (!extractFieldType(FE, RD, CGM, TSC)) { | ||||
8966 | (void) TSC.removeIncomplete(ID); | ||||
8967 | return false; | ||||
8968 | } | ||||
8969 | IsRecursive = TSC.removeIncomplete(ID); | ||||
8970 | // The ABI requires unions to be sorted but not structures. | ||||
8971 | // See FieldEncoding::operator< for sort algorithm. | ||||
8972 | if (RT->isUnionType()) | ||||
8973 | llvm::sort(FE); | ||||
8974 | // We can now complete the TypeString. | ||||
8975 | unsigned E = FE.size(); | ||||
8976 | for (unsigned I = 0; I != E; ++I) { | ||||
8977 | if (I) | ||||
8978 | Enc += ','; | ||||
8979 | Enc += FE[I].str(); | ||||
8980 | } | ||||
8981 | } | ||||
8982 | Enc += '}'; | ||||
8983 | TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); | ||||
8984 | return true; | ||||
8985 | } | ||||
8986 | |||||
8987 | /// Appends enum types to Enc and adds the encoding to the cache. | ||||
8988 | static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, | ||||
8989 | TypeStringCache &TSC, | ||||
8990 | const IdentifierInfo *ID) { | ||||
8991 | // Append the cached TypeString if we have one. | ||||
8992 | StringRef TypeString = TSC.lookupStr(ID); | ||||
8993 | if (!TypeString.empty()) { | ||||
8994 | Enc += TypeString; | ||||
8995 | return true; | ||||
8996 | } | ||||
8997 | |||||
8998 | size_t Start = Enc.size(); | ||||
8999 | Enc += "e("; | ||||
9000 | if (ID) | ||||
9001 | Enc += ID->getName(); | ||||
9002 | Enc += "){"; | ||||
9003 | |||||
9004 | // We collect all encoded enumerations and order them alphanumerically. | ||||
9005 | if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { | ||||
9006 | SmallVector<FieldEncoding, 16> FE; | ||||
9007 | for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; | ||||
9008 | ++I) { | ||||
9009 | SmallStringEnc EnumEnc; | ||||
9010 | EnumEnc += "m("; | ||||
9011 | EnumEnc += I->getName(); | ||||
9012 | EnumEnc += "){"; | ||||
9013 | I->getInitVal().toString(EnumEnc); | ||||
9014 | EnumEnc += '}'; | ||||
9015 | FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); | ||||
9016 | } | ||||
9017 | llvm::sort(FE); | ||||
9018 | unsigned E = FE.size(); | ||||
9019 | for (unsigned I = 0; I != E; ++I) { | ||||
9020 | if (I) | ||||
9021 | Enc += ','; | ||||
9022 | Enc += FE[I].str(); | ||||
9023 | } | ||||
9024 | } | ||||
9025 | Enc += '}'; | ||||
9026 | TSC.addIfComplete(ID, Enc.substr(Start), false); | ||||
9027 | return true; | ||||
9028 | } | ||||
9029 | |||||
9030 | /// Appends type's qualifier to Enc. | ||||
9031 | /// This is done prior to appending the type's encoding. | ||||
9032 | static void appendQualifier(SmallStringEnc &Enc, QualType QT) { | ||||
9033 | // Qualifiers are emitted in alphabetical order. | ||||
9034 | static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; | ||||
9035 | int Lookup = 0; | ||||
9036 | if (QT.isConstQualified()) | ||||
9037 | Lookup += 1<<0; | ||||
9038 | if (QT.isRestrictQualified()) | ||||
9039 | Lookup += 1<<1; | ||||
9040 | if (QT.isVolatileQualified()) | ||||
9041 | Lookup += 1<<2; | ||||
9042 | Enc += Table[Lookup]; | ||||
9043 | } | ||||
9044 | |||||
9045 | /// Appends built-in types to Enc. | ||||
9046 | static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { | ||||
9047 | const char *EncType; | ||||
9048 | switch (BT->getKind()) { | ||||
9049 | case BuiltinType::Void: | ||||
9050 | EncType = "0"; | ||||
9051 | break; | ||||
9052 | case BuiltinType::Bool: | ||||
9053 | EncType = "b"; | ||||
9054 | break; | ||||
9055 | case BuiltinType::Char_U: | ||||
9056 | EncType = "uc"; | ||||
9057 | break; | ||||
9058 | case BuiltinType::UChar: | ||||
9059 | EncType = "uc"; | ||||
9060 | break; | ||||
9061 | case BuiltinType::SChar: | ||||
9062 | EncType = "sc"; | ||||
9063 | break; | ||||
9064 | case BuiltinType::UShort: | ||||
9065 | EncType = "us"; | ||||
9066 | break; | ||||
9067 | case BuiltinType::Short: | ||||
9068 | EncType = "ss"; | ||||
9069 | break; | ||||
9070 | case BuiltinType::UInt: | ||||
9071 | EncType = "ui"; | ||||
9072 | break; | ||||
9073 | case BuiltinType::Int: | ||||
9074 | EncType = "si"; | ||||
9075 | break; | ||||
9076 | case BuiltinType::ULong: | ||||
9077 | EncType = "ul"; | ||||
9078 | break; | ||||
9079 | case BuiltinType::Long: | ||||
9080 | EncType = "sl"; | ||||
9081 | break; | ||||
9082 | case BuiltinType::ULongLong: | ||||
9083 | EncType = "ull"; | ||||
9084 | break; | ||||
9085 | case BuiltinType::LongLong: | ||||
9086 | EncType = "sll"; | ||||
9087 | break; | ||||
9088 | case BuiltinType::Float: | ||||
9089 | EncType = "ft"; | ||||
9090 | break; | ||||
9091 | case BuiltinType::Double: | ||||
9092 | EncType = "d"; | ||||
9093 | break; | ||||
9094 | case BuiltinType::LongDouble: | ||||
9095 | EncType = "ld"; | ||||
9096 | break; | ||||
9097 | default: | ||||
9098 | return false; | ||||
9099 | } | ||||
9100 | Enc += EncType; | ||||
9101 | return true; | ||||
9102 | } | ||||
9103 | |||||
9104 | /// Appends a pointer encoding to Enc before calling appendType for the pointee. | ||||
9105 | static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, | ||||
9106 | const CodeGen::CodeGenModule &CGM, | ||||
9107 | TypeStringCache &TSC) { | ||||
9108 | Enc += "p("; | ||||
9109 | if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) | ||||
9110 | return false; | ||||
9111 | Enc += ')'; | ||||
9112 | return true; | ||||
9113 | } | ||||
9114 | |||||
9115 | /// Appends array encoding to Enc before calling appendType for the element. | ||||
9116 | static bool appendArrayType(SmallStringEnc &Enc, QualType QT, | ||||
9117 | const ArrayType *AT, | ||||
9118 | const CodeGen::CodeGenModule &CGM, | ||||
9119 | TypeStringCache &TSC, StringRef NoSizeEnc) { | ||||
9120 | if (AT->getSizeModifier() != ArrayType::Normal) | ||||
9121 | return false; | ||||
9122 | Enc += "a("; | ||||
9123 | if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) | ||||
9124 | CAT->getSize().toStringUnsigned(Enc); | ||||
9125 | else | ||||
9126 | Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". | ||||
9127 | Enc += ':'; | ||||
9128 | // The Qualifiers should be attached to the type rather than the array. | ||||
9129 | appendQualifier(Enc, QT); | ||||
9130 | if (!appendType(Enc, AT->getElementType(), CGM, TSC)) | ||||
9131 | return false; | ||||
9132 | Enc += ')'; | ||||
9133 | return true; | ||||
9134 | } | ||||
9135 | |||||
9136 | /// Appends a function encoding to Enc, calling appendType for the return type | ||||
9137 | /// and the arguments. | ||||
9138 | static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, | ||||
9139 | const CodeGen::CodeGenModule &CGM, | ||||
9140 | TypeStringCache &TSC) { | ||||
9141 | Enc += "f{"; | ||||
9142 | if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) | ||||
9143 | return false; | ||||
9144 | Enc += "}("; | ||||
9145 | if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { | ||||
9146 | // N.B. we are only interested in the adjusted param types. | ||||
9147 | auto I = FPT->param_type_begin(); | ||||
9148 | auto E = FPT->param_type_end(); | ||||
9149 | if (I != E) { | ||||
9150 | do { | ||||
9151 | if (!appendType(Enc, *I, CGM, TSC)) | ||||
9152 | return false; | ||||
9153 | ++I; | ||||
9154 | if (I != E) | ||||
9155 | Enc += ','; | ||||
9156 | } while (I != E); | ||||
9157 | if (FPT->isVariadic()) | ||||
9158 | Enc += ",va"; | ||||
9159 | } else { | ||||
9160 | if (FPT->isVariadic()) | ||||
9161 | Enc += "va"; | ||||
9162 | else | ||||
9163 | Enc += '0'; | ||||
9164 | } | ||||
9165 | } | ||||
9166 | Enc += ')'; | ||||
9167 | return true; | ||||
9168 | } | ||||
9169 | |||||
9170 | /// Handles the type's qualifier before dispatching a call to handle specific | ||||
9171 | /// type encodings. | ||||
9172 | static bool appendType(SmallStringEnc &Enc, QualType QType, | ||||
9173 | const CodeGen::CodeGenModule &CGM, | ||||
9174 | TypeStringCache &TSC) { | ||||
9175 | |||||
9176 | QualType QT = QType.getCanonicalType(); | ||||
9177 | |||||
9178 | if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) | ||||
9179 | // The Qualifiers should be attached to the type rather than the array. | ||||
9180 | // Thus we don't call appendQualifier() here. | ||||
9181 | return appendArrayType(Enc, QT, AT, CGM, TSC, ""); | ||||
9182 | |||||
9183 | appendQualifier(Enc, QT); | ||||
9184 | |||||
9185 | if (const BuiltinType *BT = QT->getAs<BuiltinType>()) | ||||
9186 | return appendBuiltinType(Enc, BT); | ||||
9187 | |||||
9188 | if (const PointerType *PT = QT->getAs<PointerType>()) | ||||
9189 | return appendPointerType(Enc, PT, CGM, TSC); | ||||
9190 | |||||
9191 | if (const EnumType *ET = QT->getAs<EnumType>()) | ||||
9192 | return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); | ||||
9193 | |||||
9194 | if (const RecordType *RT = QT->getAsStructureType()) | ||||
9195 | return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); | ||||
9196 | |||||
9197 | if (const RecordType *RT = QT->getAsUnionType()) | ||||
9198 | return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); | ||||
9199 | |||||
9200 | if (const FunctionType *FT = QT->getAs<FunctionType>()) | ||||
9201 | return appendFunctionType(Enc, FT, CGM, TSC); | ||||
9202 | |||||
9203 | return false; | ||||
9204 | } | ||||
9205 | |||||
9206 | static bool getTypeString(SmallStringEnc &Enc, const Decl *D, | ||||
9207 | CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { | ||||
9208 | if (!D) | ||||
9209 | return false; | ||||
9210 | |||||
9211 | if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { | ||||
9212 | if (FD->getLanguageLinkage() != CLanguageLinkage) | ||||
9213 | return false; | ||||
9214 | return appendType(Enc, FD->getType(), CGM, TSC); | ||||
9215 | } | ||||
9216 | |||||
9217 | if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { | ||||
9218 | if (VD->getLanguageLinkage() != CLanguageLinkage) | ||||
9219 | return false; | ||||
9220 | QualType QT = VD->getType().getCanonicalType(); | ||||
9221 | if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { | ||||
9222 | // Global ArrayTypes are given a size of '*' if the size is unknown. | ||||
9223 | // The Qualifiers should be attached to the type rather than the array. | ||||
9224 | // Thus we don't call appendQualifier() here. | ||||
9225 | return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); | ||||
9226 | } | ||||
9227 | return appendType(Enc, QT, CGM, TSC); | ||||
9228 | } | ||||
9229 | return false; | ||||
9230 | } | ||||
9231 | |||||
9232 | //===----------------------------------------------------------------------===// | ||||
9233 | // RISCV ABI Implementation | ||||
9234 | //===----------------------------------------------------------------------===// | ||||
9235 | |||||
9236 | namespace { | ||||
9237 | class RISCVABIInfo : public DefaultABIInfo { | ||||
9238 | private: | ||||
9239 | // Size of the integer ('x') registers in bits. | ||||
9240 | unsigned XLen; | ||||
9241 | // Size of the floating point ('f') registers in bits. Note that the target | ||||
9242 | // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target | ||||
9243 | // with soft float ABI has FLen==0). | ||||
9244 | unsigned FLen; | ||||
9245 | static const int NumArgGPRs = 8; | ||||
9246 | static const int NumArgFPRs = 8; | ||||
9247 | bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, | ||||
9248 | llvm::Type *&Field1Ty, | ||||
9249 | CharUnits &Field1Off, | ||||
9250 | llvm::Type *&Field2Ty, | ||||
9251 | CharUnits &Field2Off) const; | ||||
9252 | |||||
9253 | public: | ||||
9254 | RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen) | ||||
9255 | : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {} | ||||
9256 | |||||
9257 | // DefaultABIInfo's classifyReturnType and classifyArgumentType are | ||||
9258 | // non-virtual, but computeInfo is virtual, so we overload it. | ||||
9259 | void computeInfo(CGFunctionInfo &FI) const override; | ||||
9260 | |||||
9261 | ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft, | ||||
9262 | int &ArgFPRsLeft) const; | ||||
9263 | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
9264 | |||||
9265 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
9266 | QualType Ty) const override; | ||||
9267 | |||||
9268 | ABIArgInfo extendType(QualType Ty) const; | ||||
9269 | |||||
9270 | bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, | ||||
9271 | CharUnits &Field1Off, llvm::Type *&Field2Ty, | ||||
9272 | CharUnits &Field2Off, int &NeededArgGPRs, | ||||
9273 | int &NeededArgFPRs) const; | ||||
9274 | ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty, | ||||
9275 | CharUnits Field1Off, | ||||
9276 | llvm::Type *Field2Ty, | ||||
9277 | CharUnits Field2Off) const; | ||||
9278 | }; | ||||
9279 | } // end anonymous namespace | ||||
9280 | |||||
9281 | void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
9282 | QualType RetTy = FI.getReturnType(); | ||||
9283 | if (!getCXXABI().classifyReturnType(FI)) | ||||
9284 | FI.getReturnInfo() = classifyReturnType(RetTy); | ||||
9285 | |||||
9286 | // IsRetIndirect is true if classifyArgumentType indicated the value should | ||||
9287 | // be passed indirect or if the type size is greater than 2*xlen. e.g. fp128 | ||||
9288 | // is passed direct in LLVM IR, relying on the backend lowering code to | ||||
9289 | // rewrite the argument list and pass indirectly on RV32. | ||||
9290 | bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect || | ||||
9291 | getContext().getTypeSize(RetTy) > (2 * XLen); | ||||
9292 | |||||
9293 | // We must track the number of GPRs used in order to conform to the RISC-V | ||||
9294 | // ABI, as integer scalars passed in registers should have signext/zeroext | ||||
9295 | // when promoted, but are anyext if passed on the stack. As GPR usage is | ||||
9296 | // different for variadic arguments, we must also track whether we are | ||||
9297 | // examining a vararg or not. | ||||
9298 | int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; | ||||
9299 | int ArgFPRsLeft = FLen ? NumArgFPRs : 0; | ||||
9300 | int NumFixedArgs = FI.getNumRequiredArgs(); | ||||
9301 | |||||
9302 | int ArgNum = 0; | ||||
9303 | for (auto &ArgInfo : FI.arguments()) { | ||||
9304 | bool IsFixed = ArgNum < NumFixedArgs; | ||||
9305 | ArgInfo.info = | ||||
9306 | classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft); | ||||
9307 | ArgNum++; | ||||
9308 | } | ||||
9309 | } | ||||
9310 | |||||
9311 | // Returns true if the struct is a potential candidate for the floating point | ||||
9312 | // calling convention. If this function returns true, the caller is | ||||
9313 | // responsible for checking that if there is only a single field then that | ||||
9314 | // field is a float. | ||||
9315 | bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, | ||||
9316 | llvm::Type *&Field1Ty, | ||||
9317 | CharUnits &Field1Off, | ||||
9318 | llvm::Type *&Field2Ty, | ||||
9319 | CharUnits &Field2Off) const { | ||||
9320 | bool IsInt = Ty->isIntegralOrEnumerationType(); | ||||
9321 | bool IsFloat = Ty->isRealFloatingType(); | ||||
9322 | |||||
9323 | if (IsInt || IsFloat) { | ||||
9324 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
9325 | if (IsInt && Size > XLen) | ||||
9326 | return false; | ||||
9327 | // Can't be eligible if larger than the FP registers. Half precision isn't | ||||
9328 | // currently supported on RISC-V and the ABI hasn't been confirmed, so | ||||
9329 | // default to the integer ABI in that case. | ||||
9330 | if (IsFloat && (Size > FLen || Size < 32)) | ||||
9331 | return false; | ||||
9332 | // Can't be eligible if an integer type was already found (int+int pairs | ||||
9333 | // are not eligible). | ||||
9334 | if (IsInt && Field1Ty && Field1Ty->isIntegerTy()) | ||||
9335 | return false; | ||||
9336 | if (!Field1Ty) { | ||||
9337 | Field1Ty = CGT.ConvertType(Ty); | ||||
9338 | Field1Off = CurOff; | ||||
9339 | return true; | ||||
9340 | } | ||||
9341 | if (!Field2Ty) { | ||||
9342 | Field2Ty = CGT.ConvertType(Ty); | ||||
9343 | Field2Off = CurOff; | ||||
9344 | return true; | ||||
9345 | } | ||||
9346 | return false; | ||||
9347 | } | ||||
9348 | |||||
9349 | if (auto CTy = Ty->getAs<ComplexType>()) { | ||||
9350 | if (Field1Ty) | ||||
9351 | return false; | ||||
9352 | QualType EltTy = CTy->getElementType(); | ||||
9353 | if (getContext().getTypeSize(EltTy) > FLen) | ||||
9354 | return false; | ||||
9355 | Field1Ty = CGT.ConvertType(EltTy); | ||||
9356 | Field1Off = CurOff; | ||||
9357 | assert(CurOff.isZero() && "Unexpected offset for first field")((CurOff.isZero() && "Unexpected offset for first field" ) ? static_cast<void> (0) : __assert_fail ("CurOff.isZero() && \"Unexpected offset for first field\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 9357, __PRETTY_FUNCTION__)); | ||||
9358 | Field2Ty = Field1Ty; | ||||
9359 | Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy); | ||||
9360 | return true; | ||||
9361 | } | ||||
9362 | |||||
9363 | if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) { | ||||
9364 | uint64_t ArraySize = ATy->getSize().getZExtValue(); | ||||
9365 | QualType EltTy = ATy->getElementType(); | ||||
9366 | CharUnits EltSize = getContext().getTypeSizeInChars(EltTy); | ||||
9367 | for (uint64_t i = 0; i < ArraySize; ++i) { | ||||
9368 | bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty, | ||||
9369 | Field1Off, Field2Ty, Field2Off); | ||||
9370 | if (!Ret) | ||||
9371 | return false; | ||||
9372 | CurOff += EltSize; | ||||
9373 | } | ||||
9374 | return true; | ||||
9375 | } | ||||
9376 | |||||
9377 | if (const auto *RTy = Ty->getAs<RecordType>()) { | ||||
9378 | // Structures with either a non-trivial destructor or a non-trivial | ||||
9379 | // copy constructor are not eligible for the FP calling convention. | ||||
9380 | if (getRecordArgABI(Ty, CGT.getCXXABI())) | ||||
9381 | return false; | ||||
9382 | if (isEmptyRecord(getContext(), Ty, true)) | ||||
9383 | return true; | ||||
9384 | const RecordDecl *RD = RTy->getDecl(); | ||||
9385 | // Unions aren't eligible unless they're empty (which is caught above). | ||||
9386 | if (RD->isUnion()) | ||||
9387 | return false; | ||||
9388 | int ZeroWidthBitFieldCount = 0; | ||||
9389 | for (const FieldDecl *FD : RD->fields()) { | ||||
9390 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); | ||||
9391 | uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex()); | ||||
9392 | QualType QTy = FD->getType(); | ||||
9393 | if (FD->isBitField()) { | ||||
9394 | unsigned BitWidth = FD->getBitWidthValue(getContext()); | ||||
9395 | // Allow a bitfield with a type greater than XLen as long as the | ||||
9396 | // bitwidth is XLen or less. | ||||
9397 | if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen) | ||||
9398 | QTy = getContext().getIntTypeForBitwidth(XLen, false); | ||||
9399 | if (BitWidth == 0) { | ||||
9400 | ZeroWidthBitFieldCount++; | ||||
9401 | continue; | ||||
9402 | } | ||||
9403 | } | ||||
9404 | |||||
9405 | bool Ret = detectFPCCEligibleStructHelper( | ||||
9406 | QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits), | ||||
9407 | Field1Ty, Field1Off, Field2Ty, Field2Off); | ||||
9408 | if (!Ret) | ||||
9409 | return false; | ||||
9410 | |||||
9411 | // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp | ||||
9412 | // or int+fp structs, but are ignored for a struct with an fp field and | ||||
9413 | // any number of zero-width bitfields. | ||||
9414 | if (Field2Ty && ZeroWidthBitFieldCount > 0) | ||||
9415 | return false; | ||||
9416 | } | ||||
9417 | return Field1Ty != nullptr; | ||||
9418 | } | ||||
9419 | |||||
9420 | return false; | ||||
9421 | } | ||||
9422 | |||||
9423 | // Determine if a struct is eligible for passing according to the floating | ||||
9424 | // point calling convention (i.e., when flattened it contains a single fp | ||||
9425 | // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and | ||||
9426 | // NeededArgGPRs are incremented appropriately. | ||||
9427 | bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, | ||||
9428 | CharUnits &Field1Off, | ||||
9429 | llvm::Type *&Field2Ty, | ||||
9430 | CharUnits &Field2Off, | ||||
9431 | int &NeededArgGPRs, | ||||
9432 | int &NeededArgFPRs) const { | ||||
9433 | Field1Ty = nullptr; | ||||
9434 | Field2Ty = nullptr; | ||||
9435 | NeededArgGPRs = 0; | ||||
9436 | NeededArgFPRs = 0; | ||||
9437 | bool IsCandidate = detectFPCCEligibleStructHelper( | ||||
9438 | Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off); | ||||
9439 | // Not really a candidate if we have a single int but no float. | ||||
9440 | if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy()) | ||||
9441 | return IsCandidate = false; | ||||
9442 | if (!IsCandidate) | ||||
9443 | return false; | ||||
9444 | if (Field1Ty && Field1Ty->isFloatingPointTy()) | ||||
9445 | NeededArgFPRs++; | ||||
9446 | else if (Field1Ty) | ||||
9447 | NeededArgGPRs++; | ||||
9448 | if (Field2Ty && Field2Ty->isFloatingPointTy()) | ||||
9449 | NeededArgFPRs++; | ||||
9450 | else if (Field2Ty) | ||||
9451 | NeededArgGPRs++; | ||||
9452 | return IsCandidate; | ||||
9453 | } | ||||
9454 | |||||
9455 | // Call getCoerceAndExpand for the two-element flattened struct described by | ||||
9456 | // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an | ||||
9457 | // appropriate coerceToType and unpaddedCoerceToType. | ||||
9458 | ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct( | ||||
9459 | llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty, | ||||
9460 | CharUnits Field2Off) const { | ||||
9461 | SmallVector<llvm::Type *, 3> CoerceElts; | ||||
9462 | SmallVector<llvm::Type *, 2> UnpaddedCoerceElts; | ||||
9463 | if (!Field1Off.isZero()) | ||||
9464 | CoerceElts.push_back(llvm::ArrayType::get( | ||||
9465 | llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity())); | ||||
9466 | |||||
9467 | CoerceElts.push_back(Field1Ty); | ||||
9468 | UnpaddedCoerceElts.push_back(Field1Ty); | ||||
9469 | |||||
9470 | if (!Field2Ty) { | ||||
9471 | return ABIArgInfo::getCoerceAndExpand( | ||||
9472 | llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()), | ||||
9473 | UnpaddedCoerceElts[0]); | ||||
9474 | } | ||||
9475 | |||||
9476 | CharUnits Field2Align = | ||||
9477 | CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty)); | ||||
9478 | CharUnits Field1Size = | ||||
9479 | CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty)); | ||||
9480 | CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align); | ||||
9481 | |||||
9482 | CharUnits Padding = CharUnits::Zero(); | ||||
9483 | if (Field2Off > Field2OffNoPadNoPack) | ||||
9484 | Padding = Field2Off - Field2OffNoPadNoPack; | ||||
9485 | else if (Field2Off != Field2Align && Field2Off > Field1Size) | ||||
9486 | Padding = Field2Off - Field1Size; | ||||
9487 | |||||
9488 | bool IsPacked = !Field2Off.isMultipleOf(Field2Align); | ||||
9489 | |||||
9490 | if (!Padding.isZero()) | ||||
9491 | CoerceElts.push_back(llvm::ArrayType::get( | ||||
9492 | llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity())); | ||||
9493 | |||||
9494 | CoerceElts.push_back(Field2Ty); | ||||
9495 | UnpaddedCoerceElts.push_back(Field2Ty); | ||||
9496 | |||||
9497 | auto CoerceToType = | ||||
9498 | llvm::StructType::get(getVMContext(), CoerceElts, IsPacked); | ||||
9499 | auto UnpaddedCoerceToType = | ||||
9500 | llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked); | ||||
9501 | |||||
9502 | return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType); | ||||
9503 | } | ||||
9504 | |||||
9505 | ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, | ||||
9506 | int &ArgGPRsLeft, | ||||
9507 | int &ArgFPRsLeft) const { | ||||
9508 | assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow")((ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow" ) ? static_cast<void> (0) : __assert_fail ("ArgGPRsLeft <= NumArgGPRs && \"Arg GPR tracking underflow\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 9508, __PRETTY_FUNCTION__)); | ||||
9509 | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
9510 | |||||
9511 | // Structures with either a non-trivial destructor or a non-trivial | ||||
9512 | // copy constructor are always passed indirectly. | ||||
9513 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { | ||||
9514 | if (ArgGPRsLeft) | ||||
9515 | ArgGPRsLeft -= 1; | ||||
9516 | return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == | ||||
9517 | CGCXXABI::RAA_DirectInMemory); | ||||
9518 | } | ||||
9519 | |||||
9520 | // Ignore empty structs/unions. | ||||
9521 | if (isEmptyRecord(getContext(), Ty, true)) | ||||
9522 | return ABIArgInfo::getIgnore(); | ||||
9523 | |||||
9524 | uint64_t Size = getContext().getTypeSize(Ty); | ||||
9525 | |||||
9526 | // Pass floating point values via FPRs if possible. | ||||
9527 | if (IsFixed && Ty->isFloatingType() && FLen >= Size && ArgFPRsLeft) { | ||||
9528 | ArgFPRsLeft--; | ||||
9529 | return ABIArgInfo::getDirect(); | ||||
9530 | } | ||||
9531 | |||||
9532 | // Complex types for the hard float ABI must be passed direct rather than | ||||
9533 | // using CoerceAndExpand. | ||||
9534 | if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) { | ||||
9535 | QualType EltTy = Ty->getAs<ComplexType>()->getElementType(); | ||||
9536 | if (getContext().getTypeSize(EltTy) <= FLen) { | ||||
9537 | ArgFPRsLeft -= 2; | ||||
9538 | return ABIArgInfo::getDirect(); | ||||
9539 | } | ||||
9540 | } | ||||
9541 | |||||
9542 | if (IsFixed && FLen && Ty->isStructureOrClassType()) { | ||||
9543 | llvm::Type *Field1Ty = nullptr; | ||||
9544 | llvm::Type *Field2Ty = nullptr; | ||||
9545 | CharUnits Field1Off = CharUnits::Zero(); | ||||
9546 | CharUnits Field2Off = CharUnits::Zero(); | ||||
9547 | int NeededArgGPRs; | ||||
9548 | int NeededArgFPRs; | ||||
9549 | bool IsCandidate = | ||||
9550 | detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, | ||||
9551 | NeededArgGPRs, NeededArgFPRs); | ||||
9552 | if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft && | ||||
9553 | NeededArgFPRs <= ArgFPRsLeft) { | ||||
9554 | ArgGPRsLeft -= NeededArgGPRs; | ||||
9555 | ArgFPRsLeft -= NeededArgFPRs; | ||||
9556 | return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty, | ||||
9557 | Field2Off); | ||||
9558 | } | ||||
9559 | } | ||||
9560 | |||||
9561 | uint64_t NeededAlign = getContext().getTypeAlign(Ty); | ||||
9562 | bool MustUseStack = false; | ||||
9563 | // Determine the number of GPRs needed to pass the current argument | ||||
9564 | // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" | ||||
9565 | // register pairs, so may consume 3 registers. | ||||
9566 | int NeededArgGPRs = 1; | ||||
9567 | if (!IsFixed && NeededAlign == 2 * XLen) | ||||
9568 | NeededArgGPRs = 2 + (ArgGPRsLeft % 2); | ||||
9569 | else if (Size > XLen && Size <= 2 * XLen) | ||||
9570 | NeededArgGPRs = 2; | ||||
9571 | |||||
9572 | if (NeededArgGPRs > ArgGPRsLeft) { | ||||
9573 | MustUseStack = true; | ||||
9574 | NeededArgGPRs = ArgGPRsLeft; | ||||
9575 | } | ||||
9576 | |||||
9577 | ArgGPRsLeft -= NeededArgGPRs; | ||||
9578 | |||||
9579 | if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) { | ||||
9580 | // Treat an enum type as its underlying type. | ||||
9581 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) | ||||
9582 | Ty = EnumTy->getDecl()->getIntegerType(); | ||||
9583 | |||||
9584 | // All integral types are promoted to XLen width, unless passed on the | ||||
9585 | // stack. | ||||
9586 | if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) { | ||||
9587 | return extendType(Ty); | ||||
9588 | } | ||||
9589 | |||||
9590 | return ABIArgInfo::getDirect(); | ||||
9591 | } | ||||
9592 | |||||
9593 | // Aggregates which are <= 2*XLen will be passed in registers if possible, | ||||
9594 | // so coerce to integers. | ||||
9595 | if (Size <= 2 * XLen) { | ||||
9596 | unsigned Alignment = getContext().getTypeAlign(Ty); | ||||
9597 | |||||
9598 | // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is | ||||
9599 | // required, and a 2-element XLen array if only XLen alignment is required. | ||||
9600 | if (Size <= XLen) { | ||||
9601 | return ABIArgInfo::getDirect( | ||||
9602 | llvm::IntegerType::get(getVMContext(), XLen)); | ||||
9603 | } else if (Alignment == 2 * XLen) { | ||||
9604 | return ABIArgInfo::getDirect( | ||||
9605 | llvm::IntegerType::get(getVMContext(), 2 * XLen)); | ||||
9606 | } else { | ||||
9607 | return ABIArgInfo::getDirect(llvm::ArrayType::get( | ||||
9608 | llvm::IntegerType::get(getVMContext(), XLen), 2)); | ||||
9609 | } | ||||
9610 | } | ||||
9611 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
9612 | } | ||||
9613 | |||||
9614 | ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const { | ||||
9615 | if (RetTy->isVoidType()) | ||||
9616 | return ABIArgInfo::getIgnore(); | ||||
9617 | |||||
9618 | int ArgGPRsLeft = 2; | ||||
9619 | int ArgFPRsLeft = FLen ? 2 : 0; | ||||
9620 | |||||
9621 | // The rules for return and argument types are the same, so defer to | ||||
9622 | // classifyArgumentType. | ||||
9623 | return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft, | ||||
9624 | ArgFPRsLeft); | ||||
9625 | } | ||||
9626 | |||||
9627 | Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
9628 | QualType Ty) const { | ||||
9629 | CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); | ||||
9630 | |||||
9631 | // Empty records are ignored for parameter passing purposes. | ||||
9632 | if (isEmptyRecord(getContext(), Ty, true)) { | ||||
9633 | Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); | ||||
9634 | Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); | ||||
9635 | return Addr; | ||||
9636 | } | ||||
9637 | |||||
9638 | std::pair<CharUnits, CharUnits> SizeAndAlign = | ||||
9639 | getContext().getTypeInfoInChars(Ty); | ||||
9640 | |||||
9641 | // Arguments bigger than 2*Xlen bytes are passed indirectly. | ||||
9642 | bool IsIndirect = SizeAndAlign.first > 2 * SlotSize; | ||||
9643 | |||||
9644 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign, | ||||
9645 | SlotSize, /*AllowHigherAlign=*/true); | ||||
9646 | } | ||||
9647 | |||||
9648 | ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const { | ||||
9649 | int TySize = getContext().getTypeSize(Ty); | ||||
9650 | // RV64 ABI requires unsigned 32 bit integers to be sign extended. | ||||
9651 | if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) | ||||
9652 | return ABIArgInfo::getSignExtend(Ty); | ||||
9653 | return ABIArgInfo::getExtend(Ty); | ||||
9654 | } | ||||
9655 | |||||
9656 | namespace { | ||||
9657 | class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
9658 | public: | ||||
9659 | RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, | ||||
9660 | unsigned FLen) | ||||
9661 | : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen, FLen)) {} | ||||
9662 | |||||
9663 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
9664 | CodeGen::CodeGenModule &CGM) const override { | ||||
9665 | const auto *FD = dyn_cast_or_null<FunctionDecl>(D); | ||||
9666 | if (!FD) return; | ||||
9667 | |||||
9668 | const auto *Attr = FD->getAttr<RISCVInterruptAttr>(); | ||||
9669 | if (!Attr) | ||||
9670 | return; | ||||
9671 | |||||
9672 | const char *Kind; | ||||
9673 | switch (Attr->getInterrupt()) { | ||||
9674 | case RISCVInterruptAttr::user: Kind = "user"; break; | ||||
9675 | case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break; | ||||
9676 | case RISCVInterruptAttr::machine: Kind = "machine"; break; | ||||
9677 | } | ||||
9678 | |||||
9679 | auto *Fn = cast<llvm::Function>(GV); | ||||
9680 | |||||
9681 | Fn->addFnAttr("interrupt", Kind); | ||||
9682 | } | ||||
9683 | }; | ||||
9684 | } // namespace | ||||
9685 | |||||
9686 | //===----------------------------------------------------------------------===// | ||||
9687 | // Driver code | ||||
9688 | //===----------------------------------------------------------------------===// | ||||
9689 | |||||
9690 | bool CodeGenModule::supportsCOMDAT() const { | ||||
9691 | return getTriple().supportsCOMDAT(); | ||||
9692 | } | ||||
9693 | |||||
9694 | const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { | ||||
9695 | if (TheTargetCodeGenInfo) | ||||
9696 | return *TheTargetCodeGenInfo; | ||||
9697 | |||||
9698 | // Helper to set the unique_ptr while still keeping the return value. | ||||
9699 | auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & { | ||||
9700 | this->TheTargetCodeGenInfo.reset(P); | ||||
9701 | return *P; | ||||
9702 | }; | ||||
9703 | |||||
9704 | const llvm::Triple &Triple = getTarget().getTriple(); | ||||
9705 | switch (Triple.getArch()) { | ||||
9706 | default: | ||||
9707 | return SetCGInfo(new DefaultTargetCodeGenInfo(Types)); | ||||
9708 | |||||
9709 | case llvm::Triple::le32: | ||||
9710 | return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); | ||||
9711 | case llvm::Triple::mips: | ||||
9712 | case llvm::Triple::mipsel: | ||||
9713 | if (Triple.getOS() == llvm::Triple::NaCl) | ||||
9714 | return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); | ||||
9715 | return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true)); | ||||
9716 | |||||
9717 | case llvm::Triple::mips64: | ||||
9718 | case llvm::Triple::mips64el: | ||||
9719 | return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false)); | ||||
9720 | |||||
9721 | case llvm::Triple::avr: | ||||
9722 | return SetCGInfo(new AVRTargetCodeGenInfo(Types)); | ||||
9723 | |||||
9724 | case llvm::Triple::aarch64: | ||||
9725 | case llvm::Triple::aarch64_be: { | ||||
9726 | AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; | ||||
9727 | if (getTarget().getABI() == "darwinpcs") | ||||
9728 | Kind = AArch64ABIInfo::DarwinPCS; | ||||
9729 | else if (Triple.isOSWindows()) | ||||
9730 | return SetCGInfo( | ||||
9731 | new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64)); | ||||
9732 | |||||
9733 | return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind)); | ||||
9734 | } | ||||
9735 | |||||
9736 | case llvm::Triple::wasm32: | ||||
9737 | case llvm::Triple::wasm64: | ||||
9738 | return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types)); | ||||
9739 | |||||
9740 | case llvm::Triple::arm: | ||||
9741 | case llvm::Triple::armeb: | ||||
9742 | case llvm::Triple::thumb: | ||||
9743 | case llvm::Triple::thumbeb: { | ||||
9744 | if (Triple.getOS() == llvm::Triple::Win32) { | ||||
9745 | return SetCGInfo( | ||||
9746 | new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP)); | ||||
9747 | } | ||||
9748 | |||||
9749 | ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; | ||||
9750 | StringRef ABIStr = getTarget().getABI(); | ||||
9751 | if (ABIStr == "apcs-gnu") | ||||
9752 | Kind = ARMABIInfo::APCS; | ||||
9753 | else if (ABIStr == "aapcs16") | ||||
9754 | Kind = ARMABIInfo::AAPCS16_VFP; | ||||
9755 | else if (CodeGenOpts.FloatABI == "hard" || | ||||
9756 | (CodeGenOpts.FloatABI != "soft" && | ||||
9757 | (Triple.getEnvironment() == llvm::Triple::GNUEABIHF || | ||||
9758 | Triple.getEnvironment() == llvm::Triple::MuslEABIHF || | ||||
9759 | Triple.getEnvironment() == llvm::Triple::EABIHF))) | ||||
9760 | Kind = ARMABIInfo::AAPCS_VFP; | ||||
9761 | |||||
9762 | return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind)); | ||||
9763 | } | ||||
9764 | |||||
9765 | case llvm::Triple::ppc: | ||||
9766 | return SetCGInfo( | ||||
9767 | new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft" || | ||||
9768 | getTarget().hasFeature("spe"))); | ||||
9769 | case llvm::Triple::ppc64: | ||||
9770 | if (Triple.isOSBinFormatELF()) { | ||||
9771 | PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; | ||||
9772 | if (getTarget().getABI() == "elfv2") | ||||
9773 | Kind = PPC64_SVR4_ABIInfo::ELFv2; | ||||
9774 | bool HasQPX = getTarget().getABI() == "elfv1-qpx"; | ||||
9775 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; | ||||
9776 | |||||
9777 | return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, | ||||
9778 | IsSoftFloat)); | ||||
9779 | } else | ||||
9780 | return SetCGInfo(new PPC64TargetCodeGenInfo(Types)); | ||||
9781 | case llvm::Triple::ppc64le: { | ||||
9782 | assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!")((Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!" ) ? static_cast<void> (0) : __assert_fail ("Triple.isOSBinFormatELF() && \"PPC64 LE non-ELF not supported!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/TargetInfo.cpp" , 9782, __PRETTY_FUNCTION__)); | ||||
9783 | PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; | ||||
9784 | if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx") | ||||
9785 | Kind = PPC64_SVR4_ABIInfo::ELFv1; | ||||
9786 | bool HasQPX = getTarget().getABI() == "elfv1-qpx"; | ||||
9787 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; | ||||
9788 | |||||
9789 | return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, | ||||
9790 | IsSoftFloat)); | ||||
9791 | } | ||||
9792 | |||||
9793 | case llvm::Triple::nvptx: | ||||
9794 | case llvm::Triple::nvptx64: | ||||
9795 | return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); | ||||
9796 | |||||
9797 | case llvm::Triple::msp430: | ||||
9798 | return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); | ||||
9799 | |||||
9800 | case llvm::Triple::riscv32: | ||||
9801 | case llvm::Triple::riscv64: { | ||||
9802 | StringRef ABIStr = getTarget().getABI(); | ||||
9803 | unsigned XLen = getTarget().getPointerWidth(0); | ||||
9804 | unsigned ABIFLen = 0; | ||||
9805 | if (ABIStr.endswith("f")) | ||||
9806 | ABIFLen = 32; | ||||
9807 | else if (ABIStr.endswith("d")) | ||||
9808 | ABIFLen = 64; | ||||
9809 | return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen)); | ||||
9810 | } | ||||
9811 | |||||
9812 | case llvm::Triple::systemz: { | ||||
9813 | bool HasVector = getTarget().getABI() == "vector"; | ||||
9814 | return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector)); | ||||
9815 | } | ||||
9816 | |||||
9817 | case llvm::Triple::tce: | ||||
9818 | case llvm::Triple::tcele: | ||||
9819 | return SetCGInfo(new TCETargetCodeGenInfo(Types)); | ||||
9820 | |||||
9821 | case llvm::Triple::x86: { | ||||
9822 | bool IsDarwinVectorABI = Triple.isOSDarwin(); | ||||
9823 | bool RetSmallStructInRegABI = | ||||
9824 | X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); | ||||
9825 | bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); | ||||
9826 | |||||
9827 | if (Triple.getOS() == llvm::Triple::Win32) { | ||||
9828 | return SetCGInfo(new WinX86_32TargetCodeGenInfo( | ||||
9829 | Types, IsDarwinVectorABI, RetSmallStructInRegABI, | ||||
9830 | IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); | ||||
9831 | } else { | ||||
9832 | return SetCGInfo(new X86_32TargetCodeGenInfo( | ||||
9833 | Types, IsDarwinVectorABI, RetSmallStructInRegABI, | ||||
9834 | IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters, | ||||
9835 | CodeGenOpts.FloatABI == "soft")); | ||||
9836 | } | ||||
9837 | } | ||||
9838 | |||||
9839 | case llvm::Triple::x86_64: { | ||||
9840 | StringRef ABI = getTarget().getABI(); | ||||
9841 | X86AVXABILevel AVXLevel = | ||||
9842 | (ABI == "avx512" | ||||
9843 | ? X86AVXABILevel::AVX512 | ||||
9844 | : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None); | ||||
9845 | |||||
9846 | switch (Triple.getOS()) { | ||||
9847 | case llvm::Triple::Win32: | ||||
9848 | return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); | ||||
9849 | default: | ||||
9850 | return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel)); | ||||
9851 | } | ||||
9852 | } | ||||
9853 | case llvm::Triple::hexagon: | ||||
9854 | return SetCGInfo(new HexagonTargetCodeGenInfo(Types)); | ||||
9855 | case llvm::Triple::lanai: | ||||
9856 | return SetCGInfo(new LanaiTargetCodeGenInfo(Types)); | ||||
9857 | case llvm::Triple::r600: | ||||
9858 | return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); | ||||
9859 | case llvm::Triple::amdgcn: | ||||
9860 | return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); | ||||
9861 | case llvm::Triple::sparc: | ||||
9862 | return SetCGInfo(new SparcV8TargetCodeGenInfo(Types)); | ||||
9863 | case llvm::Triple::sparcv9: | ||||
9864 | return SetCGInfo(new SparcV9TargetCodeGenInfo(Types)); | ||||
9865 | case llvm::Triple::xcore: | ||||
9866 | return SetCGInfo(new XCoreTargetCodeGenInfo(Types)); | ||||
9867 | case llvm::Triple::arc: | ||||
9868 | return SetCGInfo(new ARCTargetCodeGenInfo(Types)); | ||||
9869 | case llvm::Triple::spir: | ||||
9870 | case llvm::Triple::spir64: | ||||
9871 | return SetCGInfo(new SPIRTargetCodeGenInfo(Types)); | ||||
9872 | } | ||||
9873 | } | ||||
9874 | |||||
9875 | /// Create an OpenCL kernel for an enqueued block. | ||||
9876 | /// | ||||
9877 | /// The kernel has the same function type as the block invoke function. Its | ||||
9878 | /// name is the name of the block invoke function postfixed with "_kernel". | ||||
9879 | /// It simply calls the block invoke function then returns. | ||||
9880 | llvm::Function * | ||||
9881 | TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF, | ||||
9882 | llvm::Function *Invoke, | ||||
9883 | llvm::Value *BlockLiteral) const { | ||||
9884 | auto *InvokeFT = Invoke->getFunctionType(); | ||||
9885 | llvm::SmallVector<llvm::Type *, 2> ArgTys; | ||||
9886 | for (auto &P : InvokeFT->params()) | ||||
9887 | ArgTys.push_back(P); | ||||
9888 | auto &C = CGF.getLLVMContext(); | ||||
9889 | std::string Name = Invoke->getName().str() + "_kernel"; | ||||
9890 | auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); | ||||
9891 | auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, | ||||
9892 | &CGF.CGM.getModule()); | ||||
9893 | auto IP = CGF.Builder.saveIP(); | ||||
9894 | auto *BB = llvm::BasicBlock::Create(C, "entry", F); | ||||
9895 | auto &Builder = CGF.Builder; | ||||
9896 | Builder.SetInsertPoint(BB); | ||||
9897 | llvm::SmallVector<llvm::Value *, 2> Args; | ||||
9898 | for (auto &A : F->args()) | ||||
9899 | Args.push_back(&A); | ||||
9900 | Builder.CreateCall(Invoke, Args); | ||||
9901 | Builder.CreateRetVoid(); | ||||
9902 | Builder.restoreIP(IP); | ||||
9903 | return F; | ||||
9904 | } | ||||
9905 | |||||
9906 | /// Create an OpenCL kernel for an enqueued block. | ||||
9907 | /// | ||||
9908 | /// The type of the first argument (the block literal) is the struct type | ||||
9909 | /// of the block literal instead of a pointer type. The first argument | ||||
9910 | /// (block literal) is passed directly by value to the kernel. The kernel | ||||
9911 | /// allocates the same type of struct on stack and stores the block literal | ||||
9912 | /// to it and passes its pointer to the block invoke function. The kernel | ||||
9913 | /// has "enqueued-block" function attribute and kernel argument metadata. | ||||
9914 | llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel( | ||||
9915 | CodeGenFunction &CGF, llvm::Function *Invoke, | ||||
9916 | llvm::Value *BlockLiteral) const { | ||||
9917 | auto &Builder = CGF.Builder; | ||||
9918 | auto &C = CGF.getLLVMContext(); | ||||
9919 | |||||
9920 | auto *BlockTy = BlockLiteral->getType()->getPointerElementType(); | ||||
9921 | auto *InvokeFT = Invoke->getFunctionType(); | ||||
9922 | llvm::SmallVector<llvm::Type *, 2> ArgTys; | ||||
9923 | llvm::SmallVector<llvm::Metadata *, 8> AddressQuals; | ||||
9924 | llvm::SmallVector<llvm::Metadata *, 8> AccessQuals; | ||||
9925 | llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames; | ||||
9926 | llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames; | ||||
9927 | llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals; | ||||
9928 | llvm::SmallVector<llvm::Metadata *, 8> ArgNames; | ||||
9929 | |||||
9930 | ArgTys.push_back(BlockTy); | ||||
9931 | ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); | ||||
9932 | AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0))); | ||||
9933 | ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); | ||||
9934 | ArgTypeQuals.push_back(llvm::MDString::get(C, "")); | ||||
9935 | AccessQuals.push_back(llvm::MDString::get(C, "none")); | ||||
9936 | ArgNames.push_back(llvm::MDString::get(C, "block_literal")); | ||||
9937 | for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) { | ||||
9938 | ArgTys.push_back(InvokeFT->getParamType(I)); | ||||
9939 | ArgTypeNames.push_back(llvm::MDString::get(C, "void*")); | ||||
9940 | AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3))); | ||||
9941 | AccessQuals.push_back(llvm::MDString::get(C, "none")); | ||||
9942 | ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*")); | ||||
9943 | ArgTypeQuals.push_back(llvm::MDString::get(C, "")); | ||||
9944 | ArgNames.push_back( | ||||
9945 | llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str())); | ||||
9946 | } | ||||
9947 | std::string Name = Invoke->getName().str() + "_kernel"; | ||||
9948 | auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); | ||||
9949 | auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, | ||||
9950 | &CGF.CGM.getModule()); | ||||
9951 | F->addFnAttr("enqueued-block"); | ||||
9952 | auto IP = CGF.Builder.saveIP(); | ||||
9953 | auto *BB = llvm::BasicBlock::Create(C, "entry", F); | ||||
9954 | Builder.SetInsertPoint(BB); | ||||
9955 | unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy); | ||||
9956 | auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr); | ||||
9957 | BlockPtr->setAlignment(llvm::MaybeAlign(BlockAlign)); | ||||
9958 | Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign); | ||||
9959 | auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0)); | ||||
9960 | llvm::SmallVector<llvm::Value *, 2> Args; | ||||
9961 | Args.push_back(Cast); | ||||
9962 | for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I) | ||||
9963 | Args.push_back(I); | ||||
9964 | Builder.CreateCall(Invoke, Args); | ||||
9965 | Builder.CreateRetVoid(); | ||||
9966 | Builder.restoreIP(IP); | ||||
9967 | |||||
9968 | F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals)); | ||||
9969 | F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals)); | ||||
9970 | F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames)); | ||||
9971 | F->setMetadata("kernel_arg_base_type", | ||||
9972 | llvm::MDNode::get(C, ArgBaseTypeNames)); | ||||
9973 | F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals)); | ||||
9974 | if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata) | ||||
9975 | F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames)); | ||||
9976 | |||||
9977 | return F; | ||||
9978 | } |
1 | //===- ASTContext.h - Context to hold long-lived AST nodes ------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// Defines the clang::ASTContext interface. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_CLANG_AST_ASTCONTEXT_H |
15 | #define LLVM_CLANG_AST_ASTCONTEXT_H |
16 | |
17 | #include "clang/AST/ASTContextAllocate.h" |
18 | #include "clang/AST/ASTTypeTraits.h" |
19 | #include "clang/AST/CanonicalType.h" |
20 | #include "clang/AST/CommentCommandTraits.h" |
21 | #include "clang/AST/ComparisonCategories.h" |
22 | #include "clang/AST/Decl.h" |
23 | #include "clang/AST/DeclBase.h" |
24 | #include "clang/AST/DeclarationName.h" |
25 | #include "clang/AST/Expr.h" |
26 | #include "clang/AST/ExternalASTSource.h" |
27 | #include "clang/AST/NestedNameSpecifier.h" |
28 | #include "clang/AST/PrettyPrinter.h" |
29 | #include "clang/AST/RawCommentList.h" |
30 | #include "clang/AST/TemplateBase.h" |
31 | #include "clang/AST/TemplateName.h" |
32 | #include "clang/AST/Type.h" |
33 | #include "clang/Basic/AddressSpaces.h" |
34 | #include "clang/Basic/AttrKinds.h" |
35 | #include "clang/Basic/IdentifierTable.h" |
36 | #include "clang/Basic/LLVM.h" |
37 | #include "clang/Basic/LangOptions.h" |
38 | #include "clang/Basic/Linkage.h" |
39 | #include "clang/Basic/OperatorKinds.h" |
40 | #include "clang/Basic/PartialDiagnostic.h" |
41 | #include "clang/Basic/SanitizerBlacklist.h" |
42 | #include "clang/Basic/SourceLocation.h" |
43 | #include "clang/Basic/Specifiers.h" |
44 | #include "clang/Basic/TargetInfo.h" |
45 | #include "clang/Basic/XRayLists.h" |
46 | #include "llvm/ADT/APSInt.h" |
47 | #include "llvm/ADT/ArrayRef.h" |
48 | #include "llvm/ADT/DenseMap.h" |
49 | #include "llvm/ADT/FoldingSet.h" |
50 | #include "llvm/ADT/IntrusiveRefCntPtr.h" |
51 | #include "llvm/ADT/MapVector.h" |
52 | #include "llvm/ADT/None.h" |
53 | #include "llvm/ADT/Optional.h" |
54 | #include "llvm/ADT/PointerIntPair.h" |
55 | #include "llvm/ADT/PointerUnion.h" |
56 | #include "llvm/ADT/SmallVector.h" |
57 | #include "llvm/ADT/StringMap.h" |
58 | #include "llvm/ADT/StringRef.h" |
59 | #include "llvm/ADT/TinyPtrVector.h" |
60 | #include "llvm/ADT/Triple.h" |
61 | #include "llvm/ADT/iterator_range.h" |
62 | #include "llvm/Support/AlignOf.h" |
63 | #include "llvm/Support/Allocator.h" |
64 | #include "llvm/Support/Casting.h" |
65 | #include "llvm/Support/Compiler.h" |
66 | #include <cassert> |
67 | #include <cstddef> |
68 | #include <cstdint> |
69 | #include <iterator> |
70 | #include <memory> |
71 | #include <string> |
72 | #include <type_traits> |
73 | #include <utility> |
74 | #include <vector> |
75 | |
76 | namespace llvm { |
77 | |
78 | struct fltSemantics; |
79 | |
80 | } // namespace llvm |
81 | |
82 | namespace clang { |
83 | |
84 | class APFixedPoint; |
85 | class APValue; |
86 | class ASTMutationListener; |
87 | class ASTRecordLayout; |
88 | class AtomicExpr; |
89 | class BlockExpr; |
90 | class BuiltinTemplateDecl; |
91 | class CharUnits; |
92 | class CXXABI; |
93 | class CXXConstructorDecl; |
94 | class CXXMethodDecl; |
95 | class CXXRecordDecl; |
96 | class DiagnosticsEngine; |
97 | class Expr; |
98 | class FixedPointSemantics; |
99 | class MangleContext; |
100 | class MangleNumberingContext; |
101 | class MaterializeTemporaryExpr; |
102 | class MemberSpecializationInfo; |
103 | class Module; |
104 | class ObjCCategoryDecl; |
105 | class ObjCCategoryImplDecl; |
106 | class ObjCContainerDecl; |
107 | class ObjCImplDecl; |
108 | class ObjCImplementationDecl; |
109 | class ObjCInterfaceDecl; |
110 | class ObjCIvarDecl; |
111 | class ObjCMethodDecl; |
112 | class ObjCPropertyDecl; |
113 | class ObjCPropertyImplDecl; |
114 | class ObjCProtocolDecl; |
115 | class ObjCTypeParamDecl; |
116 | class Preprocessor; |
117 | class Stmt; |
118 | class StoredDeclsMap; |
119 | class TemplateDecl; |
120 | class TemplateParameterList; |
121 | class TemplateTemplateParmDecl; |
122 | class TemplateTypeParmDecl; |
123 | class UnresolvedSetIterator; |
124 | class UsingShadowDecl; |
125 | class VarTemplateDecl; |
126 | class VTableContextBase; |
127 | |
128 | namespace Builtin { |
129 | |
130 | class Context; |
131 | |
132 | } // namespace Builtin |
133 | |
134 | enum BuiltinTemplateKind : int; |
135 | |
136 | namespace comments { |
137 | |
138 | class FullComment; |
139 | |
140 | } // namespace comments |
141 | |
142 | namespace interp { |
143 | |
144 | class Context; |
145 | |
146 | } // namespace interp |
147 | |
148 | struct TypeInfo { |
149 | uint64_t Width = 0; |
150 | unsigned Align = 0; |
151 | bool AlignIsRequired : 1; |
152 | |
153 | TypeInfo() : AlignIsRequired(false) {} |
154 | TypeInfo(uint64_t Width, unsigned Align, bool AlignIsRequired) |
155 | : Width(Width), Align(Align), AlignIsRequired(AlignIsRequired) {} |
156 | }; |
157 | |
158 | /// Holds long-lived AST nodes (such as types and decls) that can be |
159 | /// referred to throughout the semantic analysis of a file. |
160 | class ASTContext : public RefCountedBase<ASTContext> { |
161 | public: |
162 | /// Copy initialization expr of a __block variable and a boolean flag that |
163 | /// indicates whether the expression can throw. |
164 | struct BlockVarCopyInit { |
165 | BlockVarCopyInit() = default; |
166 | BlockVarCopyInit(Expr *CopyExpr, bool CanThrow) |
167 | : ExprAndFlag(CopyExpr, CanThrow) {} |
168 | void setExprAndFlag(Expr *CopyExpr, bool CanThrow) { |
169 | ExprAndFlag.setPointerAndInt(CopyExpr, CanThrow); |
170 | } |
171 | Expr *getCopyExpr() const { return ExprAndFlag.getPointer(); } |
172 | bool canThrow() const { return ExprAndFlag.getInt(); } |
173 | llvm::PointerIntPair<Expr *, 1, bool> ExprAndFlag; |
174 | }; |
175 | |
176 | private: |
177 | friend class NestedNameSpecifier; |
178 | |
179 | mutable SmallVector<Type *, 0> Types; |
180 | mutable llvm::FoldingSet<ExtQuals> ExtQualNodes; |
181 | mutable llvm::FoldingSet<ComplexType> ComplexTypes; |
182 | mutable llvm::FoldingSet<PointerType> PointerTypes; |
183 | mutable llvm::FoldingSet<AdjustedType> AdjustedTypes; |
184 | mutable llvm::FoldingSet<BlockPointerType> BlockPointerTypes; |
185 | mutable llvm::FoldingSet<LValueReferenceType> LValueReferenceTypes; |
186 | mutable llvm::FoldingSet<RValueReferenceType> RValueReferenceTypes; |
187 | mutable llvm::FoldingSet<MemberPointerType> MemberPointerTypes; |
188 | mutable llvm::FoldingSet<ConstantArrayType> ConstantArrayTypes; |
189 | mutable llvm::FoldingSet<IncompleteArrayType> IncompleteArrayTypes; |
190 | mutable std::vector<VariableArrayType*> VariableArrayTypes; |
191 | mutable llvm::FoldingSet<DependentSizedArrayType> DependentSizedArrayTypes; |
192 | mutable llvm::FoldingSet<DependentSizedExtVectorType> |
193 | DependentSizedExtVectorTypes; |
194 | mutable llvm::FoldingSet<DependentAddressSpaceType> |
195 | DependentAddressSpaceTypes; |
196 | mutable llvm::FoldingSet<VectorType> VectorTypes; |
197 | mutable llvm::FoldingSet<DependentVectorType> DependentVectorTypes; |
198 | mutable llvm::FoldingSet<FunctionNoProtoType> FunctionNoProtoTypes; |
199 | mutable llvm::ContextualFoldingSet<FunctionProtoType, ASTContext&> |
200 | FunctionProtoTypes; |
201 | mutable llvm::FoldingSet<DependentTypeOfExprType> DependentTypeOfExprTypes; |
202 | mutable llvm::FoldingSet<DependentDecltypeType> DependentDecltypeTypes; |
203 | mutable llvm::FoldingSet<TemplateTypeParmType> TemplateTypeParmTypes; |
204 | mutable llvm::FoldingSet<ObjCTypeParamType> ObjCTypeParamTypes; |
205 | mutable llvm::FoldingSet<SubstTemplateTypeParmType> |
206 | SubstTemplateTypeParmTypes; |
207 | mutable llvm::FoldingSet<SubstTemplateTypeParmPackType> |
208 | SubstTemplateTypeParmPackTypes; |
209 | mutable llvm::ContextualFoldingSet<TemplateSpecializationType, ASTContext&> |
210 | TemplateSpecializationTypes; |
211 | mutable llvm::FoldingSet<ParenType> ParenTypes; |
212 | mutable llvm::FoldingSet<ElaboratedType> ElaboratedTypes; |
213 | mutable llvm::FoldingSet<DependentNameType> DependentNameTypes; |
214 | mutable llvm::ContextualFoldingSet<DependentTemplateSpecializationType, |
215 | ASTContext&> |
216 | DependentTemplateSpecializationTypes; |
217 | llvm::FoldingSet<PackExpansionType> PackExpansionTypes; |
218 | mutable llvm::FoldingSet<ObjCObjectTypeImpl> ObjCObjectTypes; |
219 | mutable llvm::FoldingSet<ObjCObjectPointerType> ObjCObjectPointerTypes; |
220 | mutable llvm::FoldingSet<DependentUnaryTransformType> |
221 | DependentUnaryTransformTypes; |
222 | mutable llvm::FoldingSet<AutoType> AutoTypes; |
223 | mutable llvm::FoldingSet<DeducedTemplateSpecializationType> |
224 | DeducedTemplateSpecializationTypes; |
225 | mutable llvm::FoldingSet<AtomicType> AtomicTypes; |
226 | llvm::FoldingSet<AttributedType> AttributedTypes; |
227 | mutable llvm::FoldingSet<PipeType> PipeTypes; |
228 | |
229 | mutable llvm::FoldingSet<QualifiedTemplateName> QualifiedTemplateNames; |
230 | mutable llvm::FoldingSet<DependentTemplateName> DependentTemplateNames; |
231 | mutable llvm::FoldingSet<SubstTemplateTemplateParmStorage> |
232 | SubstTemplateTemplateParms; |
233 | mutable llvm::ContextualFoldingSet<SubstTemplateTemplateParmPackStorage, |
234 | ASTContext&> |
235 | SubstTemplateTemplateParmPacks; |
236 | |
237 | /// The set of nested name specifiers. |
238 | /// |
239 | /// This set is managed by the NestedNameSpecifier class. |
240 | mutable llvm::FoldingSet<NestedNameSpecifier> NestedNameSpecifiers; |
241 | mutable NestedNameSpecifier *GlobalNestedNameSpecifier = nullptr; |
242 | |
243 | /// A cache mapping from RecordDecls to ASTRecordLayouts. |
244 | /// |
245 | /// This is lazily created. This is intentionally not serialized. |
246 | mutable llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*> |
247 | ASTRecordLayouts; |
248 | mutable llvm::DenseMap<const ObjCContainerDecl*, const ASTRecordLayout*> |
249 | ObjCLayouts; |
250 | |
251 | /// A cache from types to size and alignment information. |
252 | using TypeInfoMap = llvm::DenseMap<const Type *, struct TypeInfo>; |
253 | mutable TypeInfoMap MemoizedTypeInfo; |
254 | |
255 | /// A cache from types to unadjusted alignment information. Only ARM and |
256 | /// AArch64 targets need this information, keeping it separate prevents |
257 | /// imposing overhead on TypeInfo size. |
258 | using UnadjustedAlignMap = llvm::DenseMap<const Type *, unsigned>; |
259 | mutable UnadjustedAlignMap MemoizedUnadjustedAlign; |
260 | |
261 | /// A cache mapping from CXXRecordDecls to key functions. |
262 | llvm::DenseMap<const CXXRecordDecl*, LazyDeclPtr> KeyFunctions; |
263 | |
264 | /// Mapping from ObjCContainers to their ObjCImplementations. |
265 | llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*> ObjCImpls; |
266 | |
267 | /// Mapping from ObjCMethod to its duplicate declaration in the same |
268 | /// interface. |
269 | llvm::DenseMap<const ObjCMethodDecl*,const ObjCMethodDecl*> ObjCMethodRedecls; |
270 | |
271 | /// Mapping from __block VarDecls to BlockVarCopyInit. |
272 | llvm::DenseMap<const VarDecl *, BlockVarCopyInit> BlockVarCopyInits; |
273 | |
274 | /// Mapping from materialized temporaries with static storage duration |
275 | /// that appear in constant initializers to their evaluated values. These are |
276 | /// allocated in a std::map because their address must be stable. |
277 | llvm::DenseMap<const MaterializeTemporaryExpr *, APValue *> |
278 | MaterializedTemporaryValues; |
279 | |
280 | /// Used to cleanups APValues stored in the AST. |
281 | mutable llvm::SmallVector<APValue *, 0> APValueCleanups; |
282 | |
283 | /// A cache mapping a string value to a StringLiteral object with the same |
284 | /// value. |
285 | /// |
286 | /// This is lazily created. This is intentionally not serialized. |
287 | mutable llvm::StringMap<StringLiteral *> StringLiteralCache; |
288 | |
289 | /// Representation of a "canonical" template template parameter that |
290 | /// is used in canonical template names. |
291 | class CanonicalTemplateTemplateParm : public llvm::FoldingSetNode { |
292 | TemplateTemplateParmDecl *Parm; |
293 | |
294 | public: |
295 | CanonicalTemplateTemplateParm(TemplateTemplateParmDecl *Parm) |
296 | : Parm(Parm) {} |
297 | |
298 | TemplateTemplateParmDecl *getParam() const { return Parm; } |
299 | |
300 | void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Parm); } |
301 | |
302 | static void Profile(llvm::FoldingSetNodeID &ID, |
303 | TemplateTemplateParmDecl *Parm); |
304 | }; |
305 | mutable llvm::FoldingSet<CanonicalTemplateTemplateParm> |
306 | CanonTemplateTemplateParms; |
307 | |
308 | TemplateTemplateParmDecl * |
309 | getCanonicalTemplateTemplateParmDecl(TemplateTemplateParmDecl *TTP) const; |
310 | |
311 | /// The typedef for the __int128_t type. |
312 | mutable TypedefDecl *Int128Decl = nullptr; |
313 | |
314 | /// The typedef for the __uint128_t type. |
315 | mutable TypedefDecl *UInt128Decl = nullptr; |
316 | |
317 | /// The typedef for the target specific predefined |
318 | /// __builtin_va_list type. |
319 | mutable TypedefDecl *BuiltinVaListDecl = nullptr; |
320 | |
321 | /// The typedef for the predefined \c __builtin_ms_va_list type. |
322 | mutable TypedefDecl *BuiltinMSVaListDecl = nullptr; |
323 | |
324 | /// The typedef for the predefined \c id type. |
325 | mutable TypedefDecl *ObjCIdDecl = nullptr; |
326 | |
327 | /// The typedef for the predefined \c SEL type. |
328 | mutable TypedefDecl *ObjCSelDecl = nullptr; |
329 | |
330 | /// The typedef for the predefined \c Class type. |
331 | mutable TypedefDecl *ObjCClassDecl = nullptr; |
332 | |
333 | /// The typedef for the predefined \c Protocol class in Objective-C. |
334 | mutable ObjCInterfaceDecl *ObjCProtocolClassDecl = nullptr; |
335 | |
336 | /// The typedef for the predefined 'BOOL' type. |
337 | mutable TypedefDecl *BOOLDecl = nullptr; |
338 | |
339 | // Typedefs which may be provided defining the structure of Objective-C |
340 | // pseudo-builtins |
341 | QualType ObjCIdRedefinitionType; |
342 | QualType ObjCClassRedefinitionType; |
343 | QualType ObjCSelRedefinitionType; |
344 | |
345 | /// The identifier 'bool'. |
346 | mutable IdentifierInfo *BoolName = nullptr; |
347 | |
348 | /// The identifier 'NSObject'. |
349 | mutable IdentifierInfo *NSObjectName = nullptr; |
350 | |
351 | /// The identifier 'NSCopying'. |
352 | IdentifierInfo *NSCopyingName = nullptr; |
353 | |
354 | /// The identifier '__make_integer_seq'. |
355 | mutable IdentifierInfo *MakeIntegerSeqName = nullptr; |
356 | |
357 | /// The identifier '__type_pack_element'. |
358 | mutable IdentifierInfo *TypePackElementName = nullptr; |
359 | |
360 | QualType ObjCConstantStringType; |
361 | mutable RecordDecl *CFConstantStringTagDecl = nullptr; |
362 | mutable TypedefDecl *CFConstantStringTypeDecl = nullptr; |
363 | |
364 | mutable QualType ObjCSuperType; |
365 | |
366 | QualType ObjCNSStringType; |
367 | |
368 | /// The typedef declaration for the Objective-C "instancetype" type. |
369 | TypedefDecl *ObjCInstanceTypeDecl = nullptr; |
370 | |
371 | /// The type for the C FILE type. |
372 | TypeDecl *FILEDecl = nullptr; |
373 | |
374 | /// The type for the C jmp_buf type. |
375 | TypeDecl *jmp_bufDecl = nullptr; |
376 | |
377 | /// The type for the C sigjmp_buf type. |
378 | TypeDecl *sigjmp_bufDecl = nullptr; |
379 | |
380 | /// The type for the C ucontext_t type. |
381 | TypeDecl *ucontext_tDecl = nullptr; |
382 | |
383 | /// Type for the Block descriptor for Blocks CodeGen. |
384 | /// |
385 | /// Since this is only used for generation of debug info, it is not |
386 | /// serialized. |
387 | mutable RecordDecl *BlockDescriptorType = nullptr; |
388 | |
389 | /// Type for the Block descriptor for Blocks CodeGen. |
390 | /// |
391 | /// Since this is only used for generation of debug info, it is not |
392 | /// serialized. |
393 | mutable RecordDecl *BlockDescriptorExtendedType = nullptr; |
394 | |
395 | /// Declaration for the CUDA cudaConfigureCall function. |
396 | FunctionDecl *cudaConfigureCallDecl = nullptr; |
397 | |
398 | /// Keeps track of all declaration attributes. |
399 | /// |
400 | /// Since so few decls have attrs, we keep them in a hash map instead of |
401 | /// wasting space in the Decl class. |
402 | llvm::DenseMap<const Decl*, AttrVec*> DeclAttrs; |
403 | |
404 | /// A mapping from non-redeclarable declarations in modules that were |
405 | /// merged with other declarations to the canonical declaration that they were |
406 | /// merged into. |
407 | llvm::DenseMap<Decl*, Decl*> MergedDecls; |
408 | |
409 | /// A mapping from a defining declaration to a list of modules (other |
410 | /// than the owning module of the declaration) that contain merged |
411 | /// definitions of that entity. |
412 | llvm::DenseMap<NamedDecl*, llvm::TinyPtrVector<Module*>> MergedDefModules; |
413 | |
414 | /// Initializers for a module, in order. Each Decl will be either |
415 | /// something that has a semantic effect on startup (such as a variable with |
416 | /// a non-constant initializer), or an ImportDecl (which recursively triggers |
417 | /// initialization of another module). |
418 | struct PerModuleInitializers { |
419 | llvm::SmallVector<Decl*, 4> Initializers; |
420 | llvm::SmallVector<uint32_t, 4> LazyInitializers; |
421 | |
422 | void resolve(ASTContext &Ctx); |
423 | }; |
424 | llvm::DenseMap<Module*, PerModuleInitializers*> ModuleInitializers; |
425 | |
426 | ASTContext &this_() { return *this; } |
427 | |
428 | public: |
429 | /// A type synonym for the TemplateOrInstantiation mapping. |
430 | using TemplateOrSpecializationInfo = |
431 | llvm::PointerUnion<VarTemplateDecl *, MemberSpecializationInfo *>; |
432 | |
433 | private: |
434 | friend class ASTDeclReader; |
435 | friend class ASTReader; |
436 | friend class ASTWriter; |
437 | friend class CXXRecordDecl; |
438 | |
439 | /// A mapping to contain the template or declaration that |
440 | /// a variable declaration describes or was instantiated from, |
441 | /// respectively. |
442 | /// |
443 | /// For non-templates, this value will be NULL. For variable |
444 | /// declarations that describe a variable template, this will be a |
445 | /// pointer to a VarTemplateDecl. For static data members |
446 | /// of class template specializations, this will be the |
447 | /// MemberSpecializationInfo referring to the member variable that was |
448 | /// instantiated or specialized. Thus, the mapping will keep track of |
449 | /// the static data member templates from which static data members of |
450 | /// class template specializations were instantiated. |
451 | /// |
452 | /// Given the following example: |
453 | /// |
454 | /// \code |
455 | /// template<typename T> |
456 | /// struct X { |
457 | /// static T value; |
458 | /// }; |
459 | /// |
460 | /// template<typename T> |
461 | /// T X<T>::value = T(17); |
462 | /// |
463 | /// int *x = &X<int>::value; |
464 | /// \endcode |
465 | /// |
466 | /// This mapping will contain an entry that maps from the VarDecl for |
467 | /// X<int>::value to the corresponding VarDecl for X<T>::value (within the |
468 | /// class template X) and will be marked TSK_ImplicitInstantiation. |
469 | llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo> |
470 | TemplateOrInstantiation; |
471 | |
472 | /// Keeps track of the declaration from which a using declaration was |
473 | /// created during instantiation. |
474 | /// |
475 | /// The source and target declarations are always a UsingDecl, an |
476 | /// UnresolvedUsingValueDecl, or an UnresolvedUsingTypenameDecl. |
477 | /// |
478 | /// For example: |
479 | /// \code |
480 | /// template<typename T> |
481 | /// struct A { |
482 | /// void f(); |
483 | /// }; |
484 | /// |
485 | /// template<typename T> |
486 | /// struct B : A<T> { |
487 | /// using A<T>::f; |
488 | /// }; |
489 | /// |
490 | /// template struct B<int>; |
491 | /// \endcode |
492 | /// |
493 | /// This mapping will contain an entry that maps from the UsingDecl in |
494 | /// B<int> to the UnresolvedUsingDecl in B<T>. |
495 | llvm::DenseMap<NamedDecl *, NamedDecl *> InstantiatedFromUsingDecl; |
496 | |
497 | llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*> |
498 | InstantiatedFromUsingShadowDecl; |
499 | |
500 | llvm::DenseMap<FieldDecl *, FieldDecl *> InstantiatedFromUnnamedFieldDecl; |
501 | |
502 | /// Mapping that stores the methods overridden by a given C++ |
503 | /// member function. |
504 | /// |
505 | /// Since most C++ member functions aren't virtual and therefore |
506 | /// don't override anything, we store the overridden functions in |
507 | /// this map on the side rather than within the CXXMethodDecl structure. |
508 | using CXXMethodVector = llvm::TinyPtrVector<const CXXMethodDecl *>; |
509 | llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector> OverriddenMethods; |
510 | |
511 | /// Mapping from each declaration context to its corresponding |
512 | /// mangling numbering context (used for constructs like lambdas which |
513 | /// need to be consistently numbered for the mangler). |
514 | llvm::DenseMap<const DeclContext *, std::unique_ptr<MangleNumberingContext>> |
515 | MangleNumberingContexts; |
516 | |
517 | /// Side-table of mangling numbers for declarations which rarely |
518 | /// need them (like static local vars). |
519 | llvm::MapVector<const NamedDecl *, unsigned> MangleNumbers; |
520 | llvm::MapVector<const VarDecl *, unsigned> StaticLocalNumbers; |
521 | |
522 | /// Mapping that stores parameterIndex values for ParmVarDecls when |
523 | /// that value exceeds the bitfield size of ParmVarDeclBits.ParameterIndex. |
524 | using ParameterIndexTable = llvm::DenseMap<const VarDecl *, unsigned>; |
525 | ParameterIndexTable ParamIndices; |
526 | |
527 | ImportDecl *FirstLocalImport = nullptr; |
528 | ImportDecl *LastLocalImport = nullptr; |
529 | |
530 | TranslationUnitDecl *TUDecl; |
531 | mutable ExternCContextDecl *ExternCContext = nullptr; |
532 | mutable BuiltinTemplateDecl *MakeIntegerSeqDecl = nullptr; |
533 | mutable BuiltinTemplateDecl *TypePackElementDecl = nullptr; |
534 | |
535 | /// The associated SourceManager object. |
536 | SourceManager &SourceMgr; |
537 | |
538 | /// The language options used to create the AST associated with |
539 | /// this ASTContext object. |
540 | LangOptions &LangOpts; |
541 | |
542 | /// Blacklist object that is used by sanitizers to decide which |
543 | /// entities should not be instrumented. |
544 | std::unique_ptr<SanitizerBlacklist> SanitizerBL; |
545 | |
546 | /// Function filtering mechanism to determine whether a given function |
547 | /// should be imbued with the XRay "always" or "never" attributes. |
548 | std::unique_ptr<XRayFunctionFilter> XRayFilter; |
549 | |
550 | /// The allocator used to create AST objects. |
551 | /// |
552 | /// AST objects are never destructed; rather, all memory associated with the |
553 | /// AST objects will be released when the ASTContext itself is destroyed. |
554 | mutable llvm::BumpPtrAllocator BumpAlloc; |
555 | |
556 | /// Allocator for partial diagnostics. |
557 | PartialDiagnostic::StorageAllocator DiagAllocator; |
558 | |
559 | /// The current C++ ABI. |
560 | std::unique_ptr<CXXABI> ABI; |
561 | CXXABI *createCXXABI(const TargetInfo &T); |
562 | |
563 | /// The logical -> physical address space map. |
564 | const LangASMap *AddrSpaceMap = nullptr; |
565 | |
566 | /// Address space map mangling must be used with language specific |
567 | /// address spaces (e.g. OpenCL/CUDA) |
568 | bool AddrSpaceMapMangling; |
569 | |
570 | const TargetInfo *Target = nullptr; |
571 | const TargetInfo *AuxTarget = nullptr; |
572 | clang::PrintingPolicy PrintingPolicy; |
573 | std::unique_ptr<interp::Context> InterpContext; |
574 | |
575 | public: |
576 | IdentifierTable &Idents; |
577 | SelectorTable &Selectors; |
578 | Builtin::Context &BuiltinInfo; |
579 | mutable DeclarationNameTable DeclarationNames; |
580 | IntrusiveRefCntPtr<ExternalASTSource> ExternalSource; |
581 | ASTMutationListener *Listener = nullptr; |
582 | |
583 | /// Returns the clang bytecode interpreter context. |
584 | interp::Context &getInterpContext(); |
585 | |
586 | /// Container for either a single DynTypedNode or for an ArrayRef to |
587 | /// DynTypedNode. For use with ParentMap. |
588 | class DynTypedNodeList { |
589 | using DynTypedNode = ast_type_traits::DynTypedNode; |
590 | |
591 | llvm::AlignedCharArrayUnion<ast_type_traits::DynTypedNode, |
592 | ArrayRef<DynTypedNode>> Storage; |
593 | bool IsSingleNode; |
594 | |
595 | public: |
596 | DynTypedNodeList(const DynTypedNode &N) : IsSingleNode(true) { |
597 | new (Storage.buffer) DynTypedNode(N); |
598 | } |
599 | |
600 | DynTypedNodeList(ArrayRef<DynTypedNode> A) : IsSingleNode(false) { |
601 | new (Storage.buffer) ArrayRef<DynTypedNode>(A); |
602 | } |
603 | |
604 | const ast_type_traits::DynTypedNode *begin() const { |
605 | if (!IsSingleNode) |
606 | return reinterpret_cast<const ArrayRef<DynTypedNode> *>(Storage.buffer) |
607 | ->begin(); |
608 | return reinterpret_cast<const DynTypedNode *>(Storage.buffer); |
609 | } |
610 | |
611 | const ast_type_traits::DynTypedNode *end() const { |
612 | if (!IsSingleNode) |
613 | return reinterpret_cast<const ArrayRef<DynTypedNode> *>(Storage.buffer) |
614 | ->end(); |
615 | return reinterpret_cast<const DynTypedNode *>(Storage.buffer) + 1; |
616 | } |
617 | |
618 | size_t size() const { return end() - begin(); } |
619 | bool empty() const { return begin() == end(); } |
620 | |
621 | const DynTypedNode &operator[](size_t N) const { |
622 | assert(N < size() && "Out of bounds!")((N < size() && "Out of bounds!") ? static_cast< void> (0) : __assert_fail ("N < size() && \"Out of bounds!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/include/clang/AST/ASTContext.h" , 622, __PRETTY_FUNCTION__)); |
623 | return *(begin() + N); |
624 | } |
625 | }; |
626 | |
627 | // A traversal scope limits the parts of the AST visible to certain analyses. |
628 | // RecursiveASTVisitor::TraverseAST will only visit reachable nodes, and |
629 | // getParents() will only observe reachable parent edges. |
630 | // |
631 | // The scope is defined by a set of "top-level" declarations. |
632 | // Initially, it is the entire TU: {getTranslationUnitDecl()}. |
633 | // Changing the scope clears the parent cache, which is expensive to rebuild. |
634 | std::vector<Decl *> getTraversalScope() const { return TraversalScope; } |
635 | void setTraversalScope(const std::vector<Decl *> &); |
636 | |
637 | /// Returns the parents of the given node (within the traversal scope). |
638 | /// |
639 | /// Note that this will lazily compute the parents of all nodes |
640 | /// and store them for later retrieval. Thus, the first call is O(n) |
641 | /// in the number of AST nodes. |
642 | /// |
643 | /// Caveats and FIXMEs: |
644 | /// Calculating the parent map over all AST nodes will need to load the |
645 | /// full AST. This can be undesirable in the case where the full AST is |
646 | /// expensive to create (for example, when using precompiled header |
647 | /// preambles). Thus, there are good opportunities for optimization here. |
648 | /// One idea is to walk the given node downwards, looking for references |
649 | /// to declaration contexts - once a declaration context is found, compute |
650 | /// the parent map for the declaration context; if that can satisfy the |
651 | /// request, loading the whole AST can be avoided. Note that this is made |
652 | /// more complex by statements in templates having multiple parents - those |
653 | /// problems can be solved by building closure over the templated parts of |
654 | /// the AST, which also avoids touching large parts of the AST. |
655 | /// Additionally, we will want to add an interface to already give a hint |
656 | /// where to search for the parents, for example when looking at a statement |
657 | /// inside a certain function. |
658 | /// |
659 | /// 'NodeT' can be one of Decl, Stmt, Type, TypeLoc, |
660 | /// NestedNameSpecifier or NestedNameSpecifierLoc. |
661 | template <typename NodeT> DynTypedNodeList getParents(const NodeT &Node) { |
662 | return getParents(ast_type_traits::DynTypedNode::create(Node)); |
663 | } |
664 | |
665 | DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node); |
666 | |
667 | const clang::PrintingPolicy &getPrintingPolicy() const { |
668 | return PrintingPolicy; |
669 | } |
670 | |
671 | void setPrintingPolicy(const clang::PrintingPolicy &Policy) { |
672 | PrintingPolicy = Policy; |
673 | } |
674 | |
675 | SourceManager& getSourceManager() { return SourceMgr; } |
676 | const SourceManager& getSourceManager() const { return SourceMgr; } |
677 | |
678 | llvm::BumpPtrAllocator &getAllocator() const { |
679 | return BumpAlloc; |
680 | } |
681 | |
682 | void *Allocate(size_t Size, unsigned Align = 8) const { |
683 | return BumpAlloc.Allocate(Size, Align); |
684 | } |
685 | template <typename T> T *Allocate(size_t Num = 1) const { |
686 | return static_cast<T *>(Allocate(Num * sizeof(T), alignof(T))); |
687 | } |
688 | void Deallocate(void *Ptr) const {} |
689 | |
690 | /// Return the total amount of physical memory allocated for representing |
691 | /// AST nodes and type information. |
692 | size_t getASTAllocatedMemory() const { |
693 | return BumpAlloc.getTotalMemory(); |
694 | } |
695 | |
696 | /// Return the total memory used for various side tables. |
697 | size_t getSideTableAllocatedMemory() const; |
698 | |
699 | PartialDiagnostic::StorageAllocator &getDiagAllocator() { |
700 | return DiagAllocator; |
701 | } |
702 | |
703 | const TargetInfo &getTargetInfo() const { return *Target; } |
704 | const TargetInfo *getAuxTargetInfo() const { return AuxTarget; } |
705 | |
706 | /// getIntTypeForBitwidth - |
707 | /// sets integer QualTy according to specified details: |
708 | /// bitwidth, signed/unsigned. |
709 | /// Returns empty type if there is no appropriate target types. |
710 | QualType getIntTypeForBitwidth(unsigned DestWidth, |
711 | unsigned Signed) const; |
712 | |
713 | /// getRealTypeForBitwidth - |
714 | /// sets floating point QualTy according to specified bitwidth. |
715 | /// Returns empty type if there is no appropriate target types. |
716 | QualType getRealTypeForBitwidth(unsigned DestWidth) const; |
717 | |
718 | bool AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const; |
719 | |
720 | const LangOptions& getLangOpts() const { return LangOpts; } |
721 | |
722 | const SanitizerBlacklist &getSanitizerBlacklist() const { |
723 | return *SanitizerBL; |
724 | } |
725 | |
726 | const XRayFunctionFilter &getXRayFilter() const { |
727 | return *XRayFilter; |
728 | } |
729 | |
730 | DiagnosticsEngine &getDiagnostics() const; |
731 | |
732 | FullSourceLoc getFullLoc(SourceLocation Loc) const { |
733 | return FullSourceLoc(Loc,SourceMgr); |
734 | } |
735 | |
736 | /// All comments in this translation unit. |
737 | RawCommentList Comments; |
738 | |
739 | /// True if comments are already loaded from ExternalASTSource. |
740 | mutable bool CommentsLoaded = false; |
741 | |
742 | /// Mapping from declaration to directly attached comment. |
743 | /// |
744 | /// Raw comments are owned by Comments list. This mapping is populated |
745 | /// lazily. |
746 | mutable llvm::DenseMap<const Decl *, const RawComment *> DeclRawComments; |
747 | |
748 | /// Mapping from canonical declaration to the first redeclaration in chain |
749 | /// that has a comment attached. |
750 | /// |
751 | /// Raw comments are owned by Comments list. This mapping is populated |
752 | /// lazily. |
753 | mutable llvm::DenseMap<const Decl *, const Decl *> RedeclChainComments; |
754 | |
755 | /// Keeps track of redeclaration chains that don't have any comment attached. |
756 | /// Mapping from canonical declaration to redeclaration chain that has no |
757 | /// comments attached to any redeclaration. Specifically it's mapping to |
758 | /// the last redeclaration we've checked. |
759 | /// |
760 | /// Shall not contain declarations that have comments attached to any |
761 | /// redeclaration in their chain. |
762 | mutable llvm::DenseMap<const Decl *, const Decl *> CommentlessRedeclChains; |
763 | |
764 | /// Mapping from declarations to parsed comments attached to any |
765 | /// redeclaration. |
766 | mutable llvm::DenseMap<const Decl *, comments::FullComment *> ParsedComments; |
767 | |
768 | /// Attaches \p Comment to \p OriginalD and to its redeclaration chain |
769 | /// and removes the redeclaration chain from the set of commentless chains. |
770 | /// |
771 | /// Don't do anything if a comment has already been attached to \p OriginalD |
772 | /// or its redeclaration chain. |
773 | void cacheRawCommentForDecl(const Decl &OriginalD, |
774 | const RawComment &Comment) const; |
775 | |
776 | /// \returns searches \p CommentsInFile for doc comment for \p D. |
777 | /// |
778 | /// \p RepresentativeLocForDecl is used as a location for searching doc |
779 | /// comments. \p CommentsInFile is a mapping offset -> comment of files in the |
780 | /// same file where \p RepresentativeLocForDecl is. |
781 | RawComment *getRawCommentForDeclNoCacheImpl( |
782 | const Decl *D, const SourceLocation RepresentativeLocForDecl, |
783 | const std::map<unsigned, RawComment *> &CommentsInFile) const; |
784 | |
785 | /// Return the documentation comment attached to a given declaration, |
786 | /// without looking into cache. |
787 | RawComment *getRawCommentForDeclNoCache(const Decl *D) const; |
788 | |
789 | public: |
790 | RawCommentList &getRawCommentList() { |
791 | return Comments; |
792 | } |
793 | |
794 | void addComment(const RawComment &RC) { |
795 | assert(LangOpts.RetainCommentsFromSystemHeaders ||((LangOpts.RetainCommentsFromSystemHeaders || !SourceMgr.isInSystemHeader (RC.getSourceRange().getBegin())) ? static_cast<void> ( 0) : __assert_fail ("LangOpts.RetainCommentsFromSystemHeaders || !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/include/clang/AST/ASTContext.h" , 796, __PRETTY_FUNCTION__)) |
796 | !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()))((LangOpts.RetainCommentsFromSystemHeaders || !SourceMgr.isInSystemHeader (RC.getSourceRange().getBegin())) ? static_cast<void> ( 0) : __assert_fail ("LangOpts.RetainCommentsFromSystemHeaders || !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/include/clang/AST/ASTContext.h" , 796, __PRETTY_FUNCTION__)); |
797 | Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); |
798 | } |
799 | |
800 | /// Return the documentation comment attached to a given declaration. |
801 | /// Returns nullptr if no comment is attached. |
802 | /// |
803 | /// \param OriginalDecl if not nullptr, is set to declaration AST node that |
804 | /// had the comment, if the comment we found comes from a redeclaration. |
805 | const RawComment * |
806 | getRawCommentForAnyRedecl(const Decl *D, |
807 | const Decl **OriginalDecl = nullptr) const; |
808 | |
809 | /// Searches existing comments for doc comments that should be attached to \p |
810 | /// Decls. If any doc comment is found, it is parsed. |
811 | /// |
812 | /// Requirement: All \p Decls are in the same file. |
813 | /// |
814 | /// If the last comment in the file is already attached we assume |
815 | /// there are not comments left to be attached to \p Decls. |
816 | void attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, |
817 | const Preprocessor *PP); |
818 | |
819 | /// Return parsed documentation comment attached to a given declaration. |
820 | /// Returns nullptr if no comment is attached. |
821 | /// |
822 | /// \param PP the Preprocessor used with this TU. Could be nullptr if |
823 | /// preprocessor is not available. |
824 | comments::FullComment *getCommentForDecl(const Decl *D, |
825 | const Preprocessor *PP) const; |
826 | |
827 | /// Return parsed documentation comment attached to a given declaration. |
828 | /// Returns nullptr if no comment is attached. Does not look at any |
829 | /// redeclarations of the declaration. |
830 | comments::FullComment *getLocalCommentForDeclUncached(const Decl *D) const; |
831 | |
832 | comments::FullComment *cloneFullComment(comments::FullComment *FC, |
833 | const Decl *D) const; |
834 | |
835 | private: |
836 | mutable comments::CommandTraits CommentCommandTraits; |
837 | |
838 | /// Iterator that visits import declarations. |
839 | class import_iterator { |
840 | ImportDecl *Import = nullptr; |
841 | |
842 | public: |
843 | using value_type = ImportDecl *; |
844 | using reference = ImportDecl *; |
845 | using pointer = ImportDecl *; |
846 | using difference_type = int; |
847 | using iterator_category = std::forward_iterator_tag; |
848 | |
849 | import_iterator() = default; |
850 | explicit import_iterator(ImportDecl *Import) : Import(Import) {} |
851 | |
852 | reference operator*() const { return Import; } |
853 | pointer operator->() const { return Import; } |
854 | |
855 | import_iterator &operator++() { |
856 | Import = ASTContext::getNextLocalImport(Import); |
857 | return *this; |
858 | } |
859 | |
860 | import_iterator operator++(int) { |
861 | import_iterator Other(*this); |
862 | ++(*this); |
863 | return Other; |
864 | } |
865 | |
866 | friend bool operator==(import_iterator X, import_iterator Y) { |
867 | return X.Import == Y.Import; |
868 | } |
869 | |
870 | friend bool operator!=(import_iterator X, import_iterator Y) { |
871 | return X.Import != Y.Import; |
872 | } |
873 | }; |
874 | |
875 | public: |
876 | comments::CommandTraits &getCommentCommandTraits() const { |
877 | return CommentCommandTraits; |
878 | } |
879 | |
880 | /// Retrieve the attributes for the given declaration. |
881 | AttrVec& getDeclAttrs(const Decl *D); |
882 | |
883 | /// Erase the attributes corresponding to the given declaration. |
884 | void eraseDeclAttrs(const Decl *D); |
885 | |
886 | /// If this variable is an instantiated static data member of a |
887 | /// class template specialization, returns the templated static data member |
888 | /// from which it was instantiated. |
889 | // FIXME: Remove ? |
890 | MemberSpecializationInfo *getInstantiatedFromStaticDataMember( |
891 | const VarDecl *Var); |
892 | |
893 | TemplateOrSpecializationInfo |
894 | getTemplateOrSpecializationInfo(const VarDecl *Var); |
895 | |
896 | /// Note that the static data member \p Inst is an instantiation of |
897 | /// the static data member template \p Tmpl of a class template. |
898 | void setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, |
899 | TemplateSpecializationKind TSK, |
900 | SourceLocation PointOfInstantiation = SourceLocation()); |
901 | |
902 | void setTemplateOrSpecializationInfo(VarDecl *Inst, |
903 | TemplateOrSpecializationInfo TSI); |
904 | |
905 | /// If the given using decl \p Inst is an instantiation of a |
906 | /// (possibly unresolved) using decl from a template instantiation, |
907 | /// return it. |
908 | NamedDecl *getInstantiatedFromUsingDecl(NamedDecl *Inst); |
909 | |
910 | /// Remember that the using decl \p Inst is an instantiation |
911 | /// of the using decl \p Pattern of a class template. |
912 | void setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern); |
913 | |
914 | void setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, |
915 | UsingShadowDecl *Pattern); |
916 | UsingShadowDecl *getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst); |
917 | |
918 | FieldDecl *getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field); |
919 | |
920 | void setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, FieldDecl *Tmpl); |
921 | |
922 | // Access to the set of methods overridden by the given C++ method. |
923 | using overridden_cxx_method_iterator = CXXMethodVector::const_iterator; |
924 | overridden_cxx_method_iterator |
925 | overridden_methods_begin(const CXXMethodDecl *Method) const; |
926 | |
927 | overridden_cxx_method_iterator |
928 | overridden_methods_end(const CXXMethodDecl *Method) const; |
929 | |
930 | unsigned overridden_methods_size(const CXXMethodDecl *Method) const; |
931 | |
932 | using overridden_method_range = |
933 | llvm::iterator_range<overridden_cxx_method_iterator>; |
934 | |
935 | overridden_method_range overridden_methods(const CXXMethodDecl *Method) const; |
936 | |
937 | /// Note that the given C++ \p Method overrides the given \p |
938 | /// Overridden method. |
939 | void addOverriddenMethod(const CXXMethodDecl *Method, |
940 | const CXXMethodDecl *Overridden); |
941 | |
942 | /// Return C++ or ObjC overridden methods for the given \p Method. |
943 | /// |
944 | /// An ObjC method is considered to override any method in the class's |
945 | /// base classes, its protocols, or its categories' protocols, that has |
946 | /// the same selector and is of the same kind (class or instance). |
947 | /// A method in an implementation is not considered as overriding the same |
948 | /// method in the interface or its categories. |
949 | void getOverriddenMethods( |
950 | const NamedDecl *Method, |
951 | SmallVectorImpl<const NamedDecl *> &Overridden) const; |
952 | |
953 | /// Notify the AST context that a new import declaration has been |
954 | /// parsed or implicitly created within this translation unit. |
955 | void addedLocalImportDecl(ImportDecl *Import); |
956 | |
957 | static ImportDecl *getNextLocalImport(ImportDecl *Import) { |
958 | return Import->NextLocalImport; |
959 | } |
960 | |
961 | using import_range = llvm::iterator_range<import_iterator>; |
962 | |
963 | import_range local_imports() const { |
964 | return import_range(import_iterator(FirstLocalImport), import_iterator()); |
965 | } |
966 | |
967 | Decl *getPrimaryMergedDecl(Decl *D) { |
968 | Decl *Result = MergedDecls.lookup(D); |
969 | return Result ? Result : D; |
970 | } |
971 | void setPrimaryMergedDecl(Decl *D, Decl *Primary) { |
972 | MergedDecls[D] = Primary; |
973 | } |
974 | |
975 | /// Note that the definition \p ND has been merged into module \p M, |
976 | /// and should be visible whenever \p M is visible. |
977 | void mergeDefinitionIntoModule(NamedDecl *ND, Module *M, |
978 | bool NotifyListeners = true); |
979 | |
980 | /// Clean up the merged definition list. Call this if you might have |
981 | /// added duplicates into the list. |
982 | void deduplicateMergedDefinitonsFor(NamedDecl *ND); |
983 | |
984 | /// Get the additional modules in which the definition \p Def has |
985 | /// been merged. |
986 | ArrayRef<Module*> getModulesWithMergedDefinition(const NamedDecl *Def) { |
987 | auto MergedIt = |
988 | MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); |
989 | if (MergedIt == MergedDefModules.end()) |
990 | return None; |
991 | return MergedIt->second; |
992 | } |
993 | |
994 | /// Add a declaration to the list of declarations that are initialized |
995 | /// for a module. This will typically be a global variable (with internal |
996 | /// linkage) that runs module initializers, such as the iostream initializer, |
997 | /// or an ImportDecl nominating another module that has initializers. |
998 | void addModuleInitializer(Module *M, Decl *Init); |
999 | |
1000 | void addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs); |
1001 | |
1002 | /// Get the initializations to perform when importing a module, if any. |
1003 | ArrayRef<Decl*> getModuleInitializers(Module *M); |
1004 | |
1005 | TranslationUnitDecl *getTranslationUnitDecl() const { return TUDecl; } |
1006 | |
1007 | ExternCContextDecl *getExternCContextDecl() const; |
1008 | BuiltinTemplateDecl *getMakeIntegerSeqDecl() const; |
1009 | BuiltinTemplateDecl *getTypePackElementDecl() const; |
1010 | |
1011 | // Builtin Types. |
1012 | CanQualType VoidTy; |
1013 | CanQualType BoolTy; |
1014 | CanQualType CharTy; |
1015 | CanQualType WCharTy; // [C++ 3.9.1p5]. |
1016 | CanQualType WideCharTy; // Same as WCharTy in C++, integer type in C99. |
1017 | CanQualType WIntTy; // [C99 7.24.1], integer type unchanged by default promotions. |
1018 | CanQualType Char8Ty; // [C++20 proposal] |
1019 | CanQualType Char16Ty; // [C++0x 3.9.1p5], integer type in C99. |
1020 | CanQualType Char32Ty; // [C++0x 3.9.1p5], integer type in C99. |
1021 | CanQualType SignedCharTy, ShortTy, IntTy, LongTy, LongLongTy, Int128Ty; |
1022 | CanQualType UnsignedCharTy, UnsignedShortTy, UnsignedIntTy, UnsignedLongTy; |
1023 | CanQualType UnsignedLongLongTy, UnsignedInt128Ty; |
1024 | CanQualType FloatTy, DoubleTy, LongDoubleTy, Float128Ty; |
1025 | CanQualType ShortAccumTy, AccumTy, |
1026 | LongAccumTy; // ISO/IEC JTC1 SC22 WG14 N1169 Extension |
1027 | CanQualType UnsignedShortAccumTy, UnsignedAccumTy, UnsignedLongAccumTy; |
1028 | CanQualType ShortFractTy, FractTy, LongFractTy; |
1029 | CanQualType UnsignedShortFractTy, UnsignedFractTy, UnsignedLongFractTy; |
1030 | CanQualType SatShortAccumTy, SatAccumTy, SatLongAccumTy; |
1031 | CanQualType SatUnsignedShortAccumTy, SatUnsignedAccumTy, |
1032 | SatUnsignedLongAccumTy; |
1033 | CanQualType SatShortFractTy, SatFractTy, SatLongFractTy; |
1034 | CanQualType SatUnsignedShortFractTy, SatUnsignedFractTy, |
1035 | SatUnsignedLongFractTy; |
1036 | CanQualType HalfTy; // [OpenCL 6.1.1.1], ARM NEON |
1037 | CanQualType Float16Ty; // C11 extension ISO/IEC TS 18661-3 |
1038 | CanQualType FloatComplexTy, DoubleComplexTy, LongDoubleComplexTy; |
1039 | CanQualType Float128ComplexTy; |
1040 | CanQualType VoidPtrTy, NullPtrTy; |
1041 | CanQualType DependentTy, OverloadTy, BoundMemberTy, UnknownAnyTy; |
1042 | CanQualType BuiltinFnTy; |
1043 | CanQualType PseudoObjectTy, ARCUnbridgedCastTy; |
1044 | CanQualType ObjCBuiltinIdTy, ObjCBuiltinClassTy, ObjCBuiltinSelTy; |
1045 | CanQualType ObjCBuiltinBoolTy; |
1046 | #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ |
1047 | CanQualType SingletonId; |
1048 | #include "clang/Basic/OpenCLImageTypes.def" |
1049 | CanQualType OCLSamplerTy, OCLEventTy, OCLClkEventTy; |
1050 | CanQualType OCLQueueTy, OCLReserveIDTy; |
1051 | CanQualType OMPArraySectionTy; |
1052 | #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ |
1053 | CanQualType Id##Ty; |
1054 | #include "clang/Basic/OpenCLExtensionTypes.def" |
1055 | #define SVE_TYPE(Name, Id, SingletonId) \ |
1056 | CanQualType SingletonId; |
1057 | #include "clang/Basic/AArch64SVEACLETypes.def" |
1058 | |
1059 | // Types for deductions in C++0x [stmt.ranged]'s desugaring. Built on demand. |
1060 | mutable QualType AutoDeductTy; // Deduction against 'auto'. |
1061 | mutable QualType AutoRRefDeductTy; // Deduction against 'auto &&'. |
1062 | |
1063 | // Decl used to help define __builtin_va_list for some targets. |
1064 | // The decl is built when constructing 'BuiltinVaListDecl'. |
1065 | mutable Decl *VaListTagDecl; |
1066 | |
1067 | ASTContext(LangOptions &LOpts, SourceManager &SM, IdentifierTable &idents, |
1068 | SelectorTable &sels, Builtin::Context &builtins); |
1069 | ASTContext(const ASTContext &) = delete; |
1070 | ASTContext &operator=(const ASTContext &) = delete; |
1071 | ~ASTContext(); |
1072 | |
1073 | /// Attach an external AST source to the AST context. |
1074 | /// |
1075 | /// The external AST source provides the ability to load parts of |
1076 | /// the abstract syntax tree as needed from some external storage, |
1077 | /// e.g., a precompiled header. |
1078 | void setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source); |
1079 | |
1080 | /// Retrieve a pointer to the external AST source associated |
1081 | /// with this AST context, if any. |
1082 | ExternalASTSource *getExternalSource() const { |
1083 | return ExternalSource.get(); |
1084 | } |
1085 | |
1086 | /// Attach an AST mutation listener to the AST context. |
1087 | /// |
1088 | /// The AST mutation listener provides the ability to track modifications to |
1089 | /// the abstract syntax tree entities committed after they were initially |
1090 | /// created. |
1091 | void setASTMutationListener(ASTMutationListener *Listener) { |
1092 | this->Listener = Listener; |
1093 | } |
1094 | |
1095 | /// Retrieve a pointer to the AST mutation listener associated |
1096 | /// with this AST context, if any. |
1097 | ASTMutationListener *getASTMutationListener() const { return Listener; } |
1098 | |
1099 | void PrintStats() const; |
1100 | const SmallVectorImpl<Type *>& getTypes() const { return Types; } |
1101 | |
1102 | BuiltinTemplateDecl *buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, |
1103 | const IdentifierInfo *II) const; |
1104 | |
1105 | /// Create a new implicit TU-level CXXRecordDecl or RecordDecl |
1106 | /// declaration. |
1107 | RecordDecl *buildImplicitRecord(StringRef Name, |
1108 | RecordDecl::TagKind TK = TTK_Struct) const; |
1109 | |
1110 | /// Create a new implicit TU-level typedef declaration. |
1111 | TypedefDecl *buildImplicitTypedef(QualType T, StringRef Name) const; |
1112 | |
1113 | /// Retrieve the declaration for the 128-bit signed integer type. |
1114 | TypedefDecl *getInt128Decl() const; |
1115 | |
1116 | /// Retrieve the declaration for the 128-bit unsigned integer type. |
1117 | TypedefDecl *getUInt128Decl() const; |
1118 | |
1119 | //===--------------------------------------------------------------------===// |
1120 | // Type Constructors |
1121 | //===--------------------------------------------------------------------===// |
1122 | |
1123 | private: |
1124 | /// Return a type with extended qualifiers. |
1125 | QualType getExtQualType(const Type *Base, Qualifiers Quals) const; |
1126 | |
1127 | QualType getTypeDeclTypeSlow(const TypeDecl *Decl) const; |
1128 | |
1129 | QualType getPipeType(QualType T, bool ReadOnly) const; |
1130 | |
1131 | public: |
1132 | /// Return the uniqued reference to the type for an address space |
1133 | /// qualified type with the specified type and address space. |
1134 | /// |
1135 | /// The resulting type has a union of the qualifiers from T and the address |
1136 | /// space. If T already has an address space specifier, it is silently |
1137 | /// replaced. |
1138 | QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const; |
1139 | |
1140 | /// Remove any existing address space on the type and returns the type |
1141 | /// with qualifiers intact (or that's the idea anyway) |
1142 | /// |
1143 | /// The return type should be T with all prior qualifiers minus the address |
1144 | /// space. |
1145 | QualType removeAddrSpaceQualType(QualType T) const; |
1146 | |
1147 | /// Apply Objective-C protocol qualifiers to the given type. |
1148 | /// \param allowOnPointerType specifies if we can apply protocol |
1149 | /// qualifiers on ObjCObjectPointerType. It can be set to true when |
1150 | /// constructing the canonical type of a Objective-C type parameter. |
1151 | QualType applyObjCProtocolQualifiers(QualType type, |
1152 | ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, |
1153 | bool allowOnPointerType = false) const; |
1154 | |
1155 | /// Return the uniqued reference to the type for an Objective-C |
1156 | /// gc-qualified type. |
1157 | /// |
1158 | /// The resulting type has a union of the qualifiers from T and the gc |
1159 | /// attribute. |
1160 | QualType getObjCGCQualType(QualType T, Qualifiers::GC gcAttr) const; |
1161 | |
1162 | /// Return the uniqued reference to the type for a \c restrict |
1163 | /// qualified type. |
1164 | /// |
1165 | /// The resulting type has a union of the qualifiers from \p T and |
1166 | /// \c restrict. |
1167 | QualType getRestrictType(QualType T) const { |
1168 | return T.withFastQualifiers(Qualifiers::Restrict); |
1169 | } |
1170 | |
1171 | /// Return the uniqued reference to the type for a \c volatile |
1172 | /// qualified type. |
1173 | /// |
1174 | /// The resulting type has a union of the qualifiers from \p T and |
1175 | /// \c volatile. |
1176 | QualType getVolatileType(QualType T) const { |
1177 | return T.withFastQualifiers(Qualifiers::Volatile); |
1178 | } |
1179 | |
1180 | /// Return the uniqued reference to the type for a \c const |
1181 | /// qualified type. |
1182 | /// |
1183 | /// The resulting type has a union of the qualifiers from \p T and \c const. |
1184 | /// |
1185 | /// It can be reasonably expected that this will always be equivalent to |
1186 | /// calling T.withConst(). |
1187 | QualType getConstType(QualType T) const { return T.withConst(); } |
1188 | |
1189 | /// Change the ExtInfo on a function type. |
1190 | const FunctionType *adjustFunctionType(const FunctionType *Fn, |
1191 | FunctionType::ExtInfo EInfo); |
1192 | |
1193 | /// Adjust the given function result type. |
1194 | CanQualType getCanonicalFunctionResultType(QualType ResultType) const; |
1195 | |
1196 | /// Change the result type of a function type once it is deduced. |
1197 | void adjustDeducedFunctionResultType(FunctionDecl *FD, QualType ResultType); |
1198 | |
1199 | /// Get a function type and produce the equivalent function type with the |
1200 | /// specified exception specification. Type sugar that can be present on a |
1201 | /// declaration of a function with an exception specification is permitted |
1202 | /// and preserved. Other type sugar (for instance, typedefs) is not. |
1203 | QualType getFunctionTypeWithExceptionSpec( |
1204 | QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI); |
1205 | |
1206 | /// Determine whether two function types are the same, ignoring |
1207 | /// exception specifications in cases where they're part of the type. |
1208 | bool hasSameFunctionTypeIgnoringExceptionSpec(QualType T, QualType U); |
1209 | |
1210 | /// Change the exception specification on a function once it is |
1211 | /// delay-parsed, instantiated, or computed. |
1212 | void adjustExceptionSpec(FunctionDecl *FD, |
1213 | const FunctionProtoType::ExceptionSpecInfo &ESI, |
1214 | bool AsWritten = false); |
1215 | |
1216 | /// Return the uniqued reference to the type for a complex |
1217 | /// number with the specified element type. |
1218 | QualType getComplexType(QualType T) const; |
1219 | CanQualType getComplexType(CanQualType T) const { |
1220 | return CanQualType::CreateUnsafe(getComplexType((QualType) T)); |
1221 | } |
1222 | |
1223 | /// Return the uniqued reference to the type for a pointer to |
1224 | /// the specified type. |
1225 | QualType getPointerType(QualType T) const; |
1226 | CanQualType getPointerType(CanQualType T) const { |
1227 | return CanQualType::CreateUnsafe(getPointerType((QualType) T)); |
1228 | } |
1229 | |
1230 | /// Return the uniqued reference to a type adjusted from the original |
1231 | /// type to a new type. |
1232 | QualType getAdjustedType(QualType Orig, QualType New) const; |
1233 | CanQualType getAdjustedType(CanQualType Orig, CanQualType New) const { |
1234 | return CanQualType::CreateUnsafe( |
1235 | getAdjustedType((QualType)Orig, (QualType)New)); |
1236 | } |
1237 | |
1238 | /// Return the uniqued reference to the decayed version of the given |
1239 | /// type. Can only be called on array and function types which decay to |
1240 | /// pointer types. |
1241 | QualType getDecayedType(QualType T) const; |
1242 | CanQualType getDecayedType(CanQualType T) const { |
1243 | return CanQualType::CreateUnsafe(getDecayedType((QualType) T)); |
1244 | } |
1245 | |
1246 | /// Return the uniqued reference to the atomic type for the specified |
1247 | /// type. |
1248 | QualType getAtomicType(QualType T) const; |
1249 | |
1250 | /// Return the uniqued reference to the type for a block of the |
1251 | /// specified type. |
1252 | QualType getBlockPointerType(QualType T) const; |
1253 | |
1254 | /// Gets the struct used to keep track of the descriptor for pointer to |
1255 | /// blocks. |
1256 | QualType getBlockDescriptorType() const; |
1257 | |
1258 | /// Return a read_only pipe type for the specified type. |
1259 | QualType getReadPipeType(QualType T) const; |
1260 | |
1261 | /// Return a write_only pipe type for the specified type. |
1262 | QualType getWritePipeType(QualType T) const; |
1263 | |
1264 | /// Gets the struct used to keep track of the extended descriptor for |
1265 | /// pointer to blocks. |
1266 | QualType getBlockDescriptorExtendedType() const; |
1267 | |
1268 | /// Map an AST Type to an OpenCLTypeKind enum value. |
1269 | TargetInfo::OpenCLTypeKind getOpenCLTypeKind(const Type *T) const; |
1270 | |
1271 | /// Get address space for OpenCL type. |
1272 | LangAS getOpenCLTypeAddrSpace(const Type *T) const; |
1273 | |
1274 | void setcudaConfigureCallDecl(FunctionDecl *FD) { |
1275 | cudaConfigureCallDecl = FD; |
1276 | } |
1277 | |
1278 | FunctionDecl *getcudaConfigureCallDecl() { |
1279 | return cudaConfigureCallDecl; |
1280 | } |
1281 | |
1282 | /// Returns true iff we need copy/dispose helpers for the given type. |
1283 | bool BlockRequiresCopying(QualType Ty, const VarDecl *D); |
1284 | |
1285 | /// Returns true, if given type has a known lifetime. HasByrefExtendedLayout |
1286 | /// is set to false in this case. If HasByrefExtendedLayout returns true, |
1287 | /// byref variable has extended lifetime. |
1288 | bool getByrefLifetime(QualType Ty, |
1289 | Qualifiers::ObjCLifetime &Lifetime, |
1290 | bool &HasByrefExtendedLayout) const; |
1291 | |
1292 | /// Return the uniqued reference to the type for an lvalue reference |
1293 | /// to the specified type. |
1294 | QualType getLValueReferenceType(QualType T, bool SpelledAsLValue = true) |
1295 | const; |
1296 | |
1297 | /// Return the uniqued reference to the type for an rvalue reference |
1298 | /// to the specified type. |
1299 | QualType getRValueReferenceType(QualType T) const; |
1300 | |
1301 | /// Return the uniqued reference to the type for a member pointer to |
1302 | /// the specified type in the specified class. |
1303 | /// |
1304 | /// The class \p Cls is a \c Type because it could be a dependent name. |
1305 | QualType getMemberPointerType(QualType T, const Type *Cls) const; |
1306 | |
1307 | /// Return a non-unique reference to the type for a variable array of |
1308 | /// the specified element type. |
1309 | QualType getVariableArrayType(QualType EltTy, Expr *NumElts, |
1310 | ArrayType::ArraySizeModifier ASM, |
1311 | unsigned IndexTypeQuals, |
1312 | SourceRange Brackets) const; |
1313 | |
1314 | /// Return a non-unique reference to the type for a dependently-sized |
1315 | /// array of the specified element type. |
1316 | /// |
1317 | /// FIXME: We will need these to be uniqued, or at least comparable, at some |
1318 | /// point. |
1319 | QualType getDependentSizedArrayType(QualType EltTy, Expr *NumElts, |
1320 | ArrayType::ArraySizeModifier ASM, |
1321 | unsigned IndexTypeQuals, |
1322 | SourceRange Brackets) const; |
1323 | |
1324 | /// Return a unique reference to the type for an incomplete array of |
1325 | /// the specified element type. |
1326 | QualType getIncompleteArrayType(QualType EltTy, |
1327 | ArrayType::ArraySizeModifier ASM, |
1328 | unsigned IndexTypeQuals) const; |
1329 | |
1330 | /// Return the unique reference to the type for a constant array of |
1331 | /// the specified element type. |
1332 | QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, |
1333 | ArrayType::ArraySizeModifier ASM, |
1334 | unsigned IndexTypeQuals) const; |
1335 | |
1336 | /// Return a type for a constant array for a string literal of the |
1337 | /// specified element type and length. |
1338 | QualType getStringLiteralArrayType(QualType EltTy, unsigned Length) const; |
1339 | |
1340 | /// Returns a vla type where known sizes are replaced with [*]. |
1341 | QualType getVariableArrayDecayedType(QualType Ty) const; |
1342 | |
1343 | /// Return the unique reference to a vector type of the specified |
1344 | /// element type and size. |
1345 | /// |
1346 | /// \pre \p VectorType must be a built-in type. |
1347 | QualType getVectorType(QualType VectorType, unsigned NumElts, |
1348 | VectorType::VectorKind VecKind) const; |
1349 | /// Return the unique reference to the type for a dependently sized vector of |
1350 | /// the specified element type. |
1351 | QualType getDependentVectorType(QualType VectorType, Expr *SizeExpr, |
1352 | SourceLocation AttrLoc, |
1353 | VectorType::VectorKind VecKind) const; |
1354 | |
1355 | /// Return the unique reference to an extended vector type |
1356 | /// of the specified element type and size. |
1357 | /// |
1358 | /// \pre \p VectorType must be a built-in type. |
1359 | QualType getExtVectorType(QualType VectorType, unsigned NumElts) const; |
1360 | |
1361 | /// \pre Return a non-unique reference to the type for a dependently-sized |
1362 | /// vector of the specified element type. |
1363 | /// |
1364 | /// FIXME: We will need these to be uniqued, or at least comparable, at some |
1365 | /// point. |
1366 | QualType getDependentSizedExtVectorType(QualType VectorType, |
1367 | Expr *SizeExpr, |
1368 | SourceLocation AttrLoc) const; |
1369 | |
1370 | QualType getDependentAddressSpaceType(QualType PointeeType, |
1371 | Expr *AddrSpaceExpr, |
1372 | SourceLocation AttrLoc) const; |
1373 | |
1374 | /// Return a K&R style C function type like 'int()'. |
1375 | QualType getFunctionNoProtoType(QualType ResultTy, |
1376 | const FunctionType::ExtInfo &Info) const; |
1377 | |
1378 | QualType getFunctionNoProtoType(QualType ResultTy) const { |
1379 | return getFunctionNoProtoType(ResultTy, FunctionType::ExtInfo()); |
1380 | } |
1381 | |
1382 | /// Return a normal function type with a typed argument list. |
1383 | QualType getFunctionType(QualType ResultTy, ArrayRef<QualType> Args, |
1384 | const FunctionProtoType::ExtProtoInfo &EPI) const { |
1385 | return getFunctionTypeInternal(ResultTy, Args, EPI, false); |
1386 | } |
1387 | |
1388 | QualType adjustStringLiteralBaseType(QualType StrLTy) const; |
1389 | |
1390 | private: |
1391 | /// Return a normal function type with a typed argument list. |
1392 | QualType getFunctionTypeInternal(QualType ResultTy, ArrayRef<QualType> Args, |
1393 | const FunctionProtoType::ExtProtoInfo &EPI, |
1394 | bool OnlyWantCanonical) const; |
1395 | |
1396 | public: |
1397 | /// Return the unique reference to the type for the specified type |
1398 | /// declaration. |
1399 | QualType getTypeDeclType(const TypeDecl *Decl, |
1400 | const TypeDecl *PrevDecl = nullptr) const { |
1401 | assert(Decl && "Passed null for Decl param")((Decl && "Passed null for Decl param") ? static_cast <void> (0) : __assert_fail ("Decl && \"Passed null for Decl param\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/include/clang/AST/ASTContext.h" , 1401, __PRETTY_FUNCTION__)); |
1402 | if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); |
1403 | |
1404 | if (PrevDecl) { |
1405 | assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl")((PrevDecl->TypeForDecl && "previous decl has no TypeForDecl" ) ? static_cast<void> (0) : __assert_fail ("PrevDecl->TypeForDecl && \"previous decl has no TypeForDecl\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/include/clang/AST/ASTContext.h" , 1405, __PRETTY_FUNCTION__)); |
1406 | Decl->TypeForDecl = PrevDecl->TypeForDecl; |
1407 | return QualType(PrevDecl->TypeForDecl, 0); |
1408 | } |
1409 | |
1410 | return getTypeDeclTypeSlow(Decl); |
1411 | } |
1412 | |
1413 | /// Return the unique reference to the type for the specified |
1414 | /// typedef-name decl. |
1415 | QualType getTypedefType(const TypedefNameDecl *Decl, |
1416 | QualType Canon = QualType()) const; |
1417 | |
1418 | QualType getRecordType(const RecordDecl *Decl) const; |
1419 | |
1420 | QualType getEnumType(const EnumDecl *Decl) const; |
1421 | |
1422 | QualType getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST) const; |
1423 | |
1424 | QualType getAttributedType(attr::Kind attrKind, |
1425 | QualType modifiedType, |
1426 | QualType equivalentType); |
1427 | |
1428 | QualType getSubstTemplateTypeParmType(const TemplateTypeParmType *Replaced, |
1429 | QualType Replacement) const; |
1430 | QualType getSubstTemplateTypeParmPackType( |
1431 | const TemplateTypeParmType *Replaced, |
1432 | const TemplateArgument &ArgPack); |
1433 | |
1434 | QualType |
1435 | getTemplateTypeParmType(unsigned Depth, unsigned Index, |
1436 | bool ParameterPack, |
1437 | TemplateTypeParmDecl *ParmDecl = nullptr) const; |
1438 | |
1439 | QualType getTemplateSpecializationType(TemplateName T, |
1440 | ArrayRef<TemplateArgument> Args, |
1441 | QualType Canon = QualType()) const; |
1442 | |
1443 | QualType |
1444 | getCanonicalTemplateSpecializationType(TemplateName T, |
1445 | ArrayRef<TemplateArgument> Args) const; |
1446 | |
1447 | QualType getTemplateSpecializationType(TemplateName T, |
1448 | const TemplateArgumentListInfo &Args, |
1449 | QualType Canon = QualType()) const; |
1450 | |
1451 | TypeSourceInfo * |
1452 | getTemplateSpecializationTypeInfo(TemplateName T, SourceLocation TLoc, |
1453 | const TemplateArgumentListInfo &Args, |
1454 | QualType Canon = QualType()) const; |
1455 | |
1456 | QualType getParenType(QualType NamedType) const; |
1457 | |
1458 | QualType getMacroQualifiedType(QualType UnderlyingTy, |
1459 | const IdentifierInfo *MacroII) const; |
1460 | |
1461 | QualType getElaboratedType(ElaboratedTypeKeyword Keyword, |
1462 | NestedNameSpecifier *NNS, QualType NamedType, |
1463 | TagDecl *OwnedTagDecl = nullptr) const; |
1464 | QualType getDependentNameType(ElaboratedTypeKeyword Keyword, |
1465 | NestedNameSpecifier *NNS, |
1466 | const IdentifierInfo *Name, |
1467 | QualType Canon = QualType()) const; |
1468 | |
1469 | QualType getDependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword, |
1470 | NestedNameSpecifier *NNS, |
1471 | const IdentifierInfo *Name, |
1472 | const TemplateArgumentListInfo &Args) const; |
1473 | QualType getDependentTemplateSpecializationType( |
1474 | ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, |
1475 | const IdentifierInfo *Name, ArrayRef<TemplateArgument> Args) const; |
1476 | |
1477 | TemplateArgument getInjectedTemplateArg(NamedDecl *ParamDecl); |
1478 | |
1479 | /// Get a template argument list with one argument per template parameter |
1480 | /// in a template parameter list, such as for the injected class name of |
1481 | /// a class template. |
1482 | void getInjectedTemplateArgs(const TemplateParameterList *Params, |
1483 | SmallVectorImpl<TemplateArgument> &Args); |
1484 | |
1485 | QualType getPackExpansionType(QualType Pattern, |
1486 | Optional<unsigned> NumExpansions); |
1487 | |
1488 | QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl, |
1489 | ObjCInterfaceDecl *PrevDecl = nullptr) const; |
1490 | |
1491 | /// Legacy interface: cannot provide type arguments or __kindof. |
1492 | QualType getObjCObjectType(QualType Base, |
1493 | ObjCProtocolDecl * const *Protocols, |
1494 | unsigned NumProtocols) const; |
1495 | |
1496 | QualType getObjCObjectType(QualType Base, |
1497 | ArrayRef<QualType> typeArgs, |
1498 | ArrayRef<ObjCProtocolDecl *> protocols, |
1499 | bool isKindOf) const; |
1500 | |
1501 | QualType getObjCTypeParamType(const ObjCTypeParamDecl *Decl, |
1502 | ArrayRef<ObjCProtocolDecl *> protocols, |
1503 | QualType Canonical = QualType()) const; |
1504 | |
1505 | bool ObjCObjectAdoptsQTypeProtocols(QualType QT, ObjCInterfaceDecl *Decl); |
1506 | |
1507 | /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in |
1508 | /// QT's qualified-id protocol list adopt all protocols in IDecl's list |
1509 | /// of protocols. |
1510 | bool QIdProtocolsAdoptObjCObjectProtocols(QualType QT, |
1511 | ObjCInterfaceDecl *IDecl); |
1512 | |
1513 | /// Return a ObjCObjectPointerType type for the given ObjCObjectType. |
1514 | QualType getObjCObjectPointerType(QualType OIT) const; |
1515 | |
1516 | /// GCC extension. |
1517 | QualType getTypeOfExprType(Expr *e) const; |
1518 | QualType getTypeOfType(QualType t) const; |
1519 | |
1520 | /// C++11 decltype. |
1521 | QualType getDecltypeType(Expr *e, QualType UnderlyingType) const; |
1522 | |
1523 | /// Unary type transforms |
1524 | QualType getUnaryTransformType(QualType BaseType, QualType UnderlyingType, |
1525 | UnaryTransformType::UTTKind UKind) const; |
1526 | |
1527 | /// C++11 deduced auto type. |
1528 | QualType getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, |
1529 | bool IsDependent, bool IsPack = false) const; |
1530 | |
1531 | /// C++11 deduction pattern for 'auto' type. |
1532 | QualType getAutoDeductType() const; |
1533 | |
1534 | /// C++11 deduction pattern for 'auto &&' type. |
1535 | QualType getAutoRRefDeductType() const; |
1536 | |
1537 | /// C++17 deduced class template specialization type. |
1538 | QualType getDeducedTemplateSpecializationType(TemplateName Template, |
1539 | QualType DeducedType, |
1540 | bool IsDependent) const; |
1541 | |
1542 | /// Return the unique reference to the type for the specified TagDecl |
1543 | /// (struct/union/class/enum) decl. |
1544 | QualType getTagDeclType(const TagDecl *Decl) const; |
1545 | |
1546 | /// Return the unique type for "size_t" (C99 7.17), defined in |
1547 | /// <stddef.h>. |
1548 | /// |
1549 | /// The sizeof operator requires this (C99 6.5.3.4p4). |
1550 | CanQualType getSizeType() const; |
1551 | |
1552 | /// Return the unique signed counterpart of |
1553 | /// the integer type corresponding to size_t. |
1554 | CanQualType getSignedSizeType() const; |
1555 | |
1556 | /// Return the unique type for "intmax_t" (C99 7.18.1.5), defined in |
1557 | /// <stdint.h>. |
1558 | CanQualType getIntMaxType() const; |
1559 | |
1560 | /// Return the unique type for "uintmax_t" (C99 7.18.1.5), defined in |
1561 | /// <stdint.h>. |
1562 | CanQualType getUIntMaxType() const; |
1563 | |
1564 | /// Return the unique wchar_t type available in C++ (and available as |
1565 | /// __wchar_t as a Microsoft extension). |
1566 | QualType getWCharType() const { return WCharTy; } |
1567 | |
1568 | /// Return the type of wide characters. In C++, this returns the |
1569 | /// unique wchar_t type. In C99, this returns a type compatible with the type |
1570 | /// defined in <stddef.h> as defined by the target. |
1571 | QualType getWideCharType() const { return WideCharTy; } |
1572 | |
1573 | /// Return the type of "signed wchar_t". |
1574 | /// |
1575 | /// Used when in C++, as a GCC extension. |
1576 | QualType getSignedWCharType() const; |
1577 | |
1578 | /// Return the type of "unsigned wchar_t". |
1579 | /// |
1580 | /// Used when in C++, as a GCC extension. |
1581 | QualType getUnsignedWCharType() const; |
1582 | |
1583 | /// In C99, this returns a type compatible with the type |
1584 | /// defined in <stddef.h> as defined by the target. |
1585 | QualType getWIntType() const { return WIntTy; } |
1586 | |
1587 | /// Return a type compatible with "intptr_t" (C99 7.18.1.4), |
1588 | /// as defined by the target. |
1589 | QualType getIntPtrType() const; |
1590 | |
1591 | /// Return a type compatible with "uintptr_t" (C99 7.18.1.4), |
1592 | /// as defined by the target. |
1593 | QualType getUIntPtrType() const; |
1594 | |
1595 | /// Return the unique type for "ptrdiff_t" (C99 7.17) defined in |
1596 | /// <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). |
1597 | QualType getPointerDiffType() const; |
1598 | |
1599 | /// Return the unique unsigned counterpart of "ptrdiff_t" |
1600 | /// integer type. The standard (C11 7.21.6.1p7) refers to this type |
1601 | /// in the definition of %tu format specifier. |
1602 | QualType getUnsignedPointerDiffType() const; |
1603 | |
1604 | /// Return the unique type for "pid_t" defined in |
1605 | /// <sys/types.h>. We need this to compute the correct type for vfork(). |
1606 | QualType getProcessIDType() const; |
1607 | |
1608 | /// Return the C structure type used to represent constant CFStrings. |
1609 | QualType getCFConstantStringType() const; |
1610 | |
1611 | /// Returns the C struct type for objc_super |
1612 | QualType getObjCSuperType() const; |
1613 | void setObjCSuperType(QualType ST) { ObjCSuperType = ST; } |
1614 | |
1615 | /// Get the structure type used to representation CFStrings, or NULL |
1616 | /// if it hasn't yet been built. |
1617 | QualType getRawCFConstantStringType() const { |
1618 | if (CFConstantStringTypeDecl) |
1619 | return getTypedefType(CFConstantStringTypeDecl); |
1620 | return QualType(); |
1621 | } |
1622 | void setCFConstantStringType(QualType T); |
1623 | TypedefDecl *getCFConstantStringDecl() const; |
1624 | RecordDecl *getCFConstantStringTagDecl() const; |
1625 | |
1626 | // This setter/getter represents the ObjC type for an NSConstantString. |
1627 | void setObjCConstantStringInterface(ObjCInterfaceDecl *Decl); |
1628 | QualType getObjCConstantStringInterface() const { |
1629 | return ObjCConstantStringType; |
1630 | } |
1631 | |
1632 | QualType getObjCNSStringType() const { |
1633 | return ObjCNSStringType; |
1634 | } |
1635 | |
1636 | void setObjCNSStringType(QualType T) { |
1637 | ObjCNSStringType = T; |
1638 | } |
1639 | |
1640 | /// Retrieve the type that \c id has been defined to, which may be |
1641 | /// different from the built-in \c id if \c id has been typedef'd. |
1642 | QualType getObjCIdRedefinitionType() const { |
1643 | if (ObjCIdRedefinitionType.isNull()) |
1644 | return getObjCIdType(); |
1645 | return ObjCIdRedefinitionType; |
1646 | } |
1647 | |
1648 | /// Set the user-written type that redefines \c id. |
1649 | void setObjCIdRedefinitionType(QualType RedefType) { |
1650 | ObjCIdRedefinitionType = RedefType; |
1651 | } |
1652 | |
1653 | /// Retrieve the type that \c Class has been defined to, which may be |
1654 | /// different from the built-in \c Class if \c Class has been typedef'd. |
1655 | QualType getObjCClassRedefinitionType() const { |
1656 | if (ObjCClassRedefinitionType.isNull()) |
1657 | return getObjCClassType(); |
1658 | return ObjCClassRedefinitionType; |
1659 | } |
1660 | |
1661 | /// Set the user-written type that redefines 'SEL'. |
1662 | void setObjCClassRedefinitionType(QualType RedefType) { |
1663 | ObjCClassRedefinitionType = RedefType; |
1664 | } |
1665 | |
1666 | /// Retrieve the type that 'SEL' has been defined to, which may be |
1667 | /// different from the built-in 'SEL' if 'SEL' has been typedef'd. |
1668 | QualType getObjCSelRedefinitionType() const { |
1669 | if (ObjCSelRedefinitionType.isNull()) |
1670 | return getObjCSelType(); |
1671 | return ObjCSelRedefinitionType; |
1672 | } |
1673 | |
1674 | /// Set the user-written type that redefines 'SEL'. |
1675 | void setObjCSelRedefinitionType(QualType RedefType) { |
1676 | ObjCSelRedefinitionType = RedefType; |
1677 | } |
1678 | |
1679 | /// Retrieve the identifier 'NSObject'. |
1680 | IdentifierInfo *getNSObjectName() const { |
1681 | if (!NSObjectName) { |
1682 | NSObjectName = &Idents.get("NSObject"); |
1683 | } |
1684 | |
1685 | return NSObjectName; |
1686 | } |
1687 | |
1688 | /// Retrieve the identifier 'NSCopying'. |
1689 | IdentifierInfo *getNSCopyingName() { |
1690 | if (!NSCopyingName) { |
1691 | NSCopyingName = &Idents.get("NSCopying"); |
1692 | } |
1693 | |
1694 | return NSCopyingName; |
1695 | } |
1696 | |
1697 | CanQualType getNSUIntegerType() const { |
1698 | assert(Target && "Expected target to be initialized")((Target && "Expected target to be initialized") ? static_cast <void> (0) : __assert_fail ("Target && \"Expected target to be initialized\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/include/clang/AST/ASTContext.h" , 1698, __PRETTY_FUNCTION__)); |
1699 | const llvm::Triple &T = Target->getTriple(); |
1700 | // Windows is LLP64 rather than LP64 |
1701 | if (T.isOSWindows() && T.isArch64Bit()) |
1702 | return UnsignedLongLongTy; |
1703 | return UnsignedLongTy; |
1704 | } |
1705 | |
1706 | CanQualType getNSIntegerType() const { |
1707 | assert(Target && "Expected target to be initialized")((Target && "Expected target to be initialized") ? static_cast <void> (0) : __assert_fail ("Target && \"Expected target to be initialized\"" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/include/clang/AST/ASTContext.h" , 1707, __PRETTY_FUNCTION__)); |
1708 | const llvm::Triple &T = Target->getTriple(); |
1709 | // Windows is LLP64 rather than LP64 |
1710 | if (T.isOSWindows() && T.isArch64Bit()) |
1711 | return LongLongTy; |
1712 | return LongTy; |
1713 | } |
1714 | |
1715 | /// Retrieve the identifier 'bool'. |
1716 | IdentifierInfo *getBoolName() const { |
1717 | if (!BoolName) |
1718 | BoolName = &Idents.get("bool"); |
1719 | return BoolName; |
1720 | } |
1721 | |
1722 | IdentifierInfo *getMakeIntegerSeqName() const { |
1723 | if (!MakeIntegerSeqName) |
1724 | MakeIntegerSeqName = &Idents.get("__make_integer_seq"); |
1725 | return MakeIntegerSeqName; |
1726 | } |
1727 | |
1728 | IdentifierInfo *getTypePackElementName() const { |
1729 | if (!TypePackElementName) |
1730 | TypePackElementName = &Idents.get("__type_pack_element"); |
1731 | return TypePackElementName; |
1732 | } |
1733 | |
1734 | /// Retrieve the Objective-C "instancetype" type, if already known; |
1735 | /// otherwise, returns a NULL type; |
1736 | QualType getObjCInstanceType() { |
1737 | return getTypeDeclType(getObjCInstanceTypeDecl()); |
1738 | } |
1739 | |
1740 | /// Retrieve the typedef declaration corresponding to the Objective-C |
1741 | /// "instancetype" type. |
1742 | TypedefDecl *getObjCInstanceTypeDecl(); |
1743 | |
1744 | /// Set the type for the C FILE type. |
1745 | void setFILEDecl(TypeDecl *FILEDecl) { this->FILEDecl = FILEDecl; } |
1746 | |
1747 | /// Retrieve the C FILE type. |
1748 | QualType getFILEType() const { |
1749 | if (FILEDecl) |
1750 | return getTypeDeclType(FILEDecl); |
1751 | return QualType(); |
1752 | } |
1753 | |
1754 | /// Set the type for the C jmp_buf type. |
1755 | void setjmp_bufDecl(TypeDecl *jmp_bufDecl) { |
1756 | this->jmp_bufDecl = jmp_bufDecl; |
1757 | } |
1758 | |
1759 | /// Retrieve the C jmp_buf type. |
1760 | QualType getjmp_bufType() const { |
1761 | if (jmp_bufDecl) |
1762 | return getTypeDeclType(jmp_bufDecl); |
1763 | return QualType(); |
1764 | } |
1765 | |
1766 | /// Set the type for the C sigjmp_buf type. |
1767 | void setsigjmp_bufDecl(TypeDecl *sigjmp_bufDecl) { |
1768 | this->sigjmp_bufDecl = sigjmp_bufDecl; |
1769 | } |
1770 | |
1771 | /// Retrieve the C sigjmp_buf type. |
1772 | QualType getsigjmp_bufType() const { |
1773 | if (sigjmp_bufDecl) |
1774 | return getTypeDeclType(sigjmp_bufDecl); |
1775 | return QualType(); |
1776 | } |
1777 | |
1778 | /// Set the type for the C ucontext_t type. |
1779 | void setucontext_tDecl(TypeDecl *ucontext_tDecl) { |
1780 | this->ucontext_tDecl = ucontext_tDecl; |
1781 | } |
1782 | |
1783 | /// Retrieve the C ucontext_t type. |
1784 | QualType getucontext_tType() const { |
1785 | if (ucontext_tDecl) |
1786 | return getTypeDeclType(ucontext_tDecl); |
1787 | return QualType(); |
1788 | } |
1789 | |
1790 | /// The result type of logical operations, '<', '>', '!=', etc. |
1791 | QualType getLogicalOperationType() const { |
1792 | return getLangOpts().CPlusPlus ? BoolTy : IntTy; |
1793 | } |
1794 | |
1795 | /// Emit the Objective-CC type encoding for the given type \p T into |
1796 | /// \p S. |
1797 | /// |
1798 | /// If \p Field is specified then record field names are also encoded. |
1799 | void getObjCEncodingForType(QualType T, std::string &S, |
1800 | const FieldDecl *Field=nullptr, |
1801 | QualType *NotEncodedT=nullptr) const; |
1802 | |
1803 | /// Emit the Objective-C property type encoding for the given |
1804 | /// type \p T into \p S. |
1805 | void getObjCEncodingForPropertyType(QualType T, std::string &S) const; |
1806 | |
1807 | void getLegacyIntegralTypeEncoding(QualType &t) const; |
1808 | |
1809 | /// Put the string version of the type qualifiers \p QT into \p S. |
1810 | void getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, |
1811 | std::string &S) const; |
1812 | |
1813 | /// Emit the encoded type for the function \p Decl into \p S. |
1814 | /// |
1815 | /// This is in the same format as Objective-C method encodings. |
1816 | /// |
1817 | /// \returns true if an error occurred (e.g., because one of the parameter |
1818 | /// types is incomplete), false otherwise. |
1819 | std::string getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const; |
1820 | |
1821 | /// Emit the encoded type for the method declaration \p Decl into |
1822 | /// \p S. |
1823 | std::string getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, |
1824 | bool Extended = false) const; |
1825 | |
1826 | /// Return the encoded type for this block declaration. |
1827 | std::string getObjCEncodingForBlock(const BlockExpr *blockExpr) const; |
1828 | |
1829 | /// getObjCEncodingForPropertyDecl - Return the encoded type for |
1830 | /// this method declaration. If non-NULL, Container must be either |
1831 | /// an ObjCCategoryImplDecl or ObjCImplementationDecl; it should |
1832 | /// only be NULL when getting encodings for protocol properties. |
1833 | std::string getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, |
1834 | const Decl *Container) const; |
1835 | |
1836 | bool ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, |
1837 | ObjCProtocolDecl *rProto) const; |
1838 | |
1839 | ObjCPropertyImplDecl *getObjCPropertyImplDeclForPropertyDecl( |
1840 | const ObjCPropertyDecl *PD, |
1841 | const Decl *Container) const; |
1842 | |
1843 | /// Return the size of type \p T for Objective-C encoding purpose, |
1844 | /// in characters. |
1845 | CharUnits getObjCEncodingTypeSize(QualType T) const; |
1846 | |
1847 | /// Retrieve the typedef corresponding to the predefined \c id type |
1848 | /// in Objective-C. |
1849 | TypedefDecl *getObjCIdDecl() const; |
1850 | |
1851 | /// Represents the Objective-CC \c id type. |
1852 | /// |
1853 | /// This is set up lazily, by Sema. \c id is always a (typedef for a) |
1854 | /// pointer type, a pointer to a struct. |
1855 | QualType getObjCIdType() const { |
1856 | return getTypeDeclType(getObjCIdDecl()); |
1857 | } |
1858 | |
1859 | /// Retrieve the typedef corresponding to the predefined 'SEL' type |
1860 | /// in Objective-C. |
1861 | TypedefDecl *getObjCSelDecl() const; |
1862 | |
1863 | /// Retrieve the type that corresponds to the predefined Objective-C |
1864 | /// 'SEL' type. |
1865 | QualType getObjCSelType() const { |
1866 | return getTypeDeclType(getObjCSelDecl()); |
1867 | } |
1868 | |
1869 | /// Retrieve the typedef declaration corresponding to the predefined |
1870 | /// Objective-C 'Class' type. |
1871 | TypedefDecl *getObjCClassDecl() const; |
1872 | |
1873 | /// Represents the Objective-C \c Class type. |
1874 | /// |
1875 | /// This is set up lazily, by Sema. \c Class is always a (typedef for a) |
1876 | /// pointer type, a pointer to a struct. |
1877 | QualType getObjCClassType() const { |
1878 | return getTypeDeclType(getObjCClassDecl()); |
1879 | } |
1880 | |
1881 | /// Retrieve the Objective-C class declaration corresponding to |
1882 | /// the predefined \c Protocol class. |
1883 | ObjCInterfaceDecl *getObjCProtocolDecl() const; |
1884 | |
1885 | /// Retrieve declaration of 'BOOL' typedef |
1886 | TypedefDecl *getBOOLDecl() const { |
1887 | return BOOLDecl; |
1888 | } |
1889 | |
1890 | /// Save declaration of 'BOOL' typedef |
1891 | void setBOOLDecl(TypedefDecl *TD) { |
1892 | BOOLDecl = TD; |
1893 | } |
1894 | |
1895 | /// type of 'BOOL' type. |
1896 | QualType getBOOLType() const { |
1897 | return getTypeDeclType(getBOOLDecl()); |
1898 | } |
1899 | |
1900 | /// Retrieve the type of the Objective-C \c Protocol class. |
1901 | QualType getObjCProtoType() const { |
1902 | return getObjCInterfaceType(getObjCProtocolDecl()); |
1903 | } |
1904 | |
1905 | /// Retrieve the C type declaration corresponding to the predefined |
1906 | /// \c __builtin_va_list type. |
1907 | TypedefDecl *getBuiltinVaListDecl() const; |
1908 | |
1909 | /// Retrieve the type of the \c __builtin_va_list type. |
1910 | QualType getBuiltinVaListType() const { |
1911 | return getTypeDeclType(getBuiltinVaListDecl()); |
1912 | } |
1913 | |
1914 | /// Retrieve the C type declaration corresponding to the predefined |
1915 | /// \c __va_list_tag type used to help define the \c __builtin_va_list type |
1916 | /// for some targets. |
1917 | Decl *getVaListTagDecl() const; |
1918 | |
1919 | /// Retrieve the C type declaration corresponding to the predefined |
1920 | /// \c __builtin_ms_va_list type. |
1921 | TypedefDecl *getBuiltinMSVaListDecl() const; |
1922 | |
1923 | /// Retrieve the type of the \c __builtin_ms_va_list type. |
1924 | QualType getBuiltinMSVaListType() const { |
1925 | return getTypeDeclType(getBuiltinMSVaListDecl()); |
1926 | } |
1927 | |
1928 | /// Return whether a declaration to a builtin is allowed to be |
1929 | /// overloaded/redeclared. |
1930 | bool canBuiltinBeRedeclared(const FunctionDecl *) const; |
1931 | |
1932 | /// Return a type with additional \c const, \c volatile, or |
1933 | /// \c restrict qualifiers. |
1934 | QualType getCVRQualifiedType(QualType T, unsigned CVR) const { |
1935 | return getQualifiedType(T, Qualifiers::fromCVRMask(CVR)); |
1936 | } |
1937 | |
1938 | /// Un-split a SplitQualType. |
1939 | QualType getQualifiedType(SplitQualType split) const { |
1940 | return getQualifiedType(split.Ty, split.Quals); |
1941 | } |
1942 | |
1943 | /// Return a type with additional qualifiers. |
1944 | QualType getQualifiedType(QualType T, Qualifiers Qs) const { |
1945 | if (!Qs.hasNonFastQualifiers()) |
1946 | return T.withFastQualifiers(Qs.getFastQualifiers()); |
1947 | QualifierCollector Qc(Qs); |
1948 | const Type *Ptr = Qc.strip(T); |
1949 | return getExtQualType(Ptr, Qc); |
1950 | } |
1951 | |
1952 | /// Return a type with additional qualifiers. |
1953 | QualType getQualifiedType(const Type *T, Qualifiers Qs) const { |
1954 | if (!Qs.hasNonFastQualifiers()) |
1955 | return QualType(T, Qs.getFastQualifiers()); |
1956 | return getExtQualType(T, Qs); |
1957 | } |
1958 | |
1959 | /// Return a type with the given lifetime qualifier. |
1960 | /// |
1961 | /// \pre Neither type.ObjCLifetime() nor \p lifetime may be \c OCL_None. |
1962 | QualType getLifetimeQualifiedType(QualType type, |
1963 | Qualifiers::ObjCLifetime lifetime) { |
1964 | assert(type.getObjCLifetime() == Qualifiers::OCL_None)((type.getObjCLifetime() == Qualifiers::OCL_None) ? static_cast <void> (0) : __assert_fail ("type.getObjCLifetime() == Qualifiers::OCL_None" , "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/include/clang/AST/ASTContext.h" , 1964, __PRETTY_FUNCTION__)); |
1965 | assert(lifetime != Qualifiers::OCL_None)((lifetime != Qualifiers::OCL_None) ? static_cast<void> (0) : __assert_fail ("lifetime != Qualifiers::OCL_None", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/include/clang/AST/ASTContext.h" , 1965, __PRETTY_FUNCTION__)); |
1966 | |
1967 | Qualifiers qs; |
1968 | qs.addObjCLifetime(lifetime); |
1969 | return getQualifiedType(type, qs); |
1970 | } |
1971 | |
1972 | /// getUnqualifiedObjCPointerType - Returns version of |
1973 | /// Objective-C pointer type with lifetime qualifier removed. |
1974 | QualType getUnqualifiedObjCPointerType(QualType type) const { |
1975 | if (!type.getTypePtr()->isObjCObjectPointerType() || |
1976 | !type.getQualifiers().hasObjCLifetime()) |
1977 | return type; |
1978 | Qualifiers Qs = type.getQualifiers(); |
1979 | Qs.removeObjCLifetime(); |
1980 | return getQualifiedType(type.getUnqualifiedType(), Qs); |
1981 | } |
1982 | |
1983 | unsigned char getFixedPointScale(QualType Ty) const; |
1984 | unsigned char getFixedPointIBits(QualType Ty) const; |
1985 | FixedPointSemantics getFixedPointSemantics(QualType Ty) const; |
1986 | APFixedPoint getFixedPointMax(QualType Ty) const; |
1987 | APFixedPoint getFixedPointMin(QualType Ty) const; |
1988 | |
1989 | DeclarationNameInfo getNameForTemplate(TemplateName Name, |
1990 | SourceLocation NameLoc) const; |
1991 | |
1992 | TemplateName getOverloadedTemplateName(UnresolvedSetIterator Begin, |
1993 | UnresolvedSetIterator End) const; |
1994 | TemplateName getAssumedTemplateName(DeclarationName Name) const; |
1995 | |
1996 | TemplateName getQualifiedTemplateName(NestedNameSpecifier *NNS, |
1997 | bool TemplateKeyword, |
1998 | TemplateDecl *Template) const; |
1999 | |
2000 | TemplateName getDependentTemplateName(NestedNameSpecifier *NNS, |
2001 | const IdentifierInfo *Name) const; |
2002 | TemplateName getDependentTemplateName(NestedNameSpecifier *NNS, |
2003 | OverloadedOperatorKind Operator) const; |
2004 | TemplateName getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, |
2005 | TemplateName replacement) const; |
2006 | TemplateName getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, |
2007 | const TemplateArgument &ArgPack) const; |
2008 | |
2009 | enum GetBuiltinTypeError { |
2010 | /// No error |
2011 | GE_None, |
2012 | |
2013 | /// Missing a type |
2014 | GE_Missing_type, |
2015 | |
2016 | /// Missing a type from <stdio.h> |
2017 | GE_Missing_stdio, |
2018 | |
2019 | /// Missing a type from <setjmp.h> |
2020 | GE_Missing_setjmp, |
2021 | |
2022 | /// Missing a type from <ucontext.h> |
2023 | GE_Missing_ucontext |
2024 | }; |
2025 | |
2026 | /// Return the type for the specified builtin. |
2027 | /// |
2028 | /// If \p IntegerConstantArgs is non-null, it is filled in with a bitmask of |
2029 | /// arguments to the builtin that are required to be integer constant |
2030 | /// expressions. |
2031 | QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, |
2032 | unsigned *IntegerConstantArgs = nullptr) const; |
2033 | |
2034 | /// Types and expressions required to build C++2a three-way comparisons |
2035 | /// using operator<=>, including the values return by builtin <=> operators. |
2036 | ComparisonCategories CompCategories; |
2037 | |
2038 | private: |
2039 | CanQualType getFromTargetType(unsigned Type) const; |
2040 | TypeInfo getTypeInfoImpl(const Type *T) const; |
2041 | |
2042 | //===--------------------------------------------------------------------===// |
2043 | // Type Predicates. |
2044 | //===--------------------------------------------------------------------===// |
2045 | |
2046 | public: |
2047 | /// Return one of the GCNone, Weak or Strong Objective-C garbage |
2048 | /// collection attributes. |
2049 | Qualifiers::GC getObjCGCAttrKind(QualType Ty) const; |
2050 | |
2051 | /// Return true if the given vector types are of the same unqualified |
2052 | /// type or if they are equivalent to the same GCC vector type. |
2053 | /// |
2054 | /// \note This ignores whether they are target-specific (AltiVec or Neon) |
2055 | /// types. |
2056 | bool areCompatibleVectorTypes(QualType FirstVec, QualType SecondVec); |
2057 | |
2058 | /// Return true if the type has been explicitly qualified with ObjC ownership. |
2059 | /// A type may be implicitly qualified with ownership under ObjC ARC, and in |
2060 | /// some cases the compiler treats these differently. |
2061 | bool hasDirectOwnershipQualifier(QualType Ty) const; |
2062 | |
2063 | /// Return true if this is an \c NSObject object with its \c NSObject |
2064 | /// attribute set. |
2065 | static bool isObjCNSObjectType(QualType Ty) { |
2066 | return Ty->isObjCNSObjectType(); |
2067 | } |
2068 | |
2069 | //===--------------------------------------------------------------------===// |
2070 | // Type Sizing and Analysis |
2071 | //===--------------------------------------------------------------------===// |
2072 | |
2073 | /// Return the APFloat 'semantics' for the specified scalar floating |
2074 | /// point type. |
2075 | const llvm::fltSemantics &getFloatTypeSemantics(QualType T) const; |
2076 | |
2077 | /// Get the size and alignment of the specified complete type in bits. |
2078 | TypeInfo getTypeInfo(const Type *T) const; |
2079 | TypeInfo getTypeInfo(QualType T) const { return getTypeInfo(T.getTypePtr()); } |
2080 | |
2081 | /// Get default simd alignment of the specified complete type in bits. |
2082 | unsigned getOpenMPDefaultSimdAlign(QualType T) const; |
2083 | |
2084 | /// Return the size of the specified (complete) type \p T, in bits. |
2085 | uint64_t getTypeSize(QualType T) const { return getTypeInfo(T).Width; } |
2086 | uint64_t getTypeSize(const Type *T) const { return getTypeInfo(T).Width; } |
2087 | |
2088 | /// Return the size of the character type, in bits. |
2089 | uint64_t getCharWidth() const { |
2090 | return getTypeSize(CharTy); |
2091 | } |
2092 | |
2093 | /// Convert a size in bits to a size in characters. |
2094 | CharUnits toCharUnitsFromBits(int64_t BitSize) const; |
2095 | |
2096 | /// Convert a size in characters to a size in bits. |
2097 | int64_t toBits(CharUnits CharSize) const; |
2098 | |
2099 | /// Return the size of the specified (complete) type \p T, in |
2100 | /// characters. |
2101 | CharUnits getTypeSizeInChars(QualType T) const; |
2102 | CharUnits getTypeSizeInChars(const Type *T) const; |
2103 | |
2104 | Optional<CharUnits> getTypeSizeInCharsIfKnown(QualType Ty) const { |
2105 | if (Ty->isIncompleteType() || Ty->isDependentType()) |
2106 | return None; |
2107 | return getTypeSizeInChars(Ty); |
2108 | } |
2109 | |
2110 | Optional<CharUnits> getTypeSizeInCharsIfKnown(const Type *Ty) const { |
2111 | return getTypeSizeInCharsIfKnown(QualType(Ty, 0)); |
2112 | } |
2113 | |
2114 | /// Return the ABI-specified alignment of a (complete) type \p T, in |
2115 | /// bits. |
2116 | unsigned getTypeAlign(QualType T) const { return getTypeInfo(T).Align; } |
2117 | unsigned getTypeAlign(const Type *T) const { return getTypeInfo(T).Align; } |
2118 | |
2119 | /// Return the ABI-specified natural alignment of a (complete) type \p T, |
2120 | /// before alignment adjustments, in bits. |
2121 | /// |
2122 | /// This alignment is curently used only by ARM and AArch64 when passing |
2123 | /// arguments of a composite type. |
2124 | unsigned getTypeUnadjustedAlign(QualType T) const { |
2125 | return getTypeUnadjustedAlign(T.getTypePtr()); |
2126 | } |
2127 | unsigned getTypeUnadjustedAlign(const Type *T) const; |
2128 | |
2129 | /// Return the ABI-specified alignment of a type, in bits, or 0 if |
2130 | /// the type is incomplete and we cannot determine the alignment (for |
2131 | /// example, from alignment attributes). |
2132 | unsigned getTypeAlignIfKnown(QualType T) const; |
2133 | |
2134 | /// Return the ABI-specified alignment of a (complete) type \p T, in |
2135 | /// characters. |
2136 | CharUnits getTypeAlignInChars(QualType T) const; |
2137 | CharUnits getTypeAlignInChars(const Type *T) const; |
2138 | |
2139 | /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a type, |
2140 | /// in characters, before alignment adjustments. This method does not work on |
2141 | /// incomplete types. |
2142 | CharUnits getTypeUnadjustedAlignInChars(QualType T) const; |
2143 | CharUnits getTypeUnadjustedAlignInChars(const Type *T) const; |
2144 | |
2145 | // getTypeInfoDataSizeInChars - Return the size of a type, in chars. If the |
2146 | // type is a record, its data size is returned. |
2147 | std::pair<CharUnits, CharUnits> getTypeInfoDataSizeInChars(QualType T) const; |
2148 | |
2149 | std::pair<CharUnits, CharUnits> getTypeInfoInChars(const Type *T) const; |
2150 | std::pair<CharUnits, CharUnits> getTypeInfoInChars(QualType T) const; |
2151 | |
2152 | /// Determine if the alignment the type has was required using an |
2153 | /// alignment attribute. |
2154 | bool isAlignmentRequired(const Type *T) const; |
2155 | bool isAlignmentRequired(QualType T) const; |
2156 | |
2157 | /// Return the "preferred" alignment of the specified type \p T for |
2158 | /// the current target, in bits. |
2159 | /// |
2160 | /// This can be different than the ABI alignment in cases where it is |
2161 | /// beneficial for performance to overalign a data type. |
2162 | unsigned getPreferredTypeAlign(const Type *T) const; |
2163 | |
2164 | /// Return the default alignment for __attribute__((aligned)) on |
2165 | /// this target, to be used if no alignment value is specified. |
2166 | unsigned getTargetDefaultAlignForAttributeAligned() const; |
2167 | |
2168 | /// Return the alignment in bits that should be given to a |
2169 | /// global variable with type \p T. |
2170 | unsigned getAlignOfGlobalVar(QualType T) const; |
2171 | |
2172 | /// Return the alignment in characters that should be given to a |
2173 | /// global variable with type \p T. |
2174 | CharUnits getAlignOfGlobalVarInChars(QualType T) const; |
2175 | |
2176 | /// Return a conservative estimate of the alignment of the specified |
2177 | /// decl \p D. |
2178 | /// |
2179 | /// \pre \p D must not be a bitfield type, as bitfields do not have a valid |
2180 | /// alignment. |
2181 | /// |
2182 | /// If \p ForAlignof, references are treated like their underlying type |
2183 | /// and large arrays don't get any special treatment. If not \p ForAlignof |
2184 | /// it computes the value expected by CodeGen: references are treated like |
2185 | /// pointers and large arrays get extra alignment. |
2186 | CharUnits getDeclAlign(const Decl *D, bool ForAlignof = false) const; |
2187 | |
2188 | /// Return the alignment (in bytes) of the thrown exception object. This is |
2189 | /// only meaningful for targets that allocate C++ exceptions in a system |
2190 | /// runtime, such as those using the Itanium C++ ABI. |
2191 | CharUnits getExnObjectAlignment() const { |
2192 | return toCharUnitsFromBits(Target->getExnObjectAlignment()); |
2193 | } |
2194 | |
2195 | /// Get or compute information about the layout of the specified |
2196 | /// record (struct/union/class) \p D, which indicates its size and field |
2197 | /// position information. |
2198 | const ASTRecordLayout &getASTRecordLayout(const RecordDecl *D) const; |
2199 | |
2200 | /// Get or compute information about the layout of the specified |
2201 | /// Objective-C interface. |
2202 | const ASTRecordLayout &getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) |
2203 | const; |
2204 | |
2205 | void DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS, |
2206 | bool Simple = false) const; |
2207 | |
2208 | /// Get or compute information about the layout of the specified |
2209 | /// Objective-C implementation. |
2210 | /// |
2211 | /// This may differ from the interface if synthesized ivars are present. |
2212 | const ASTRecordLayout & |
2213 | getASTObjCImplementationLayout(const ObjCImplementationDecl *D) const; |
2214 | |
2215 | /// Get our current best idea for the key function of the |
2216 | /// given record decl, or nullptr if there isn't one. |
2217 | /// |
2218 | /// The key function is, according to the Itanium C++ ABI section 5.2.3: |
2219 | /// ...the first non-pure virtual function that is not inline at the |
2220 | /// point of class definition. |
2221 | /// |
2222 | /// Other ABIs use the same idea. However, the ARM C++ ABI ignores |
2223 | /// virtual functions that are defined 'inline', which means that |
2224 | /// the result of this computation can change. |
2225 | const CXXMethodDecl *getCurrentKeyFunction(const CXXRecordDecl *RD); |
2226 | |
2227 | /// Observe that the given method cannot be a key function. |
2228 | /// Checks the key-function cache for the method's class and clears it |
2229 | /// if matches the given declaration. |
2230 | /// |
2231 | /// This is used in ABIs where out-of-line definitions marked |
2232 | /// inline are not considered to be key functions. |
2233 | /// |
2234 | /// \param method should be the declaration from the class definition |
2235 | void setNonKeyFunction(const CXXMethodDecl *method); |
2236 | |
2237 | /// Loading virtual member pointers using the virtual inheritance model |
2238 | /// always results in an adjustment using the vbtable even if the index is |
2239 | /// zero. |
2240 | /// |
2241 | /// This is usually OK because the first slot in the vbtable points |
2242 | /// backwards to the top of the MDC. However, the MDC might be reusing a |
2243 | /// vbptr from an nv-base. In this case, the first slot in the vbtable |
2244 | /// points to the start of the nv-base which introduced the vbptr and *not* |
2245 | /// the MDC. Modify the NonVirtualBaseAdjustment to account for this. |
2246 | CharUnits getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const; |
2247 | |
2248 | /// Get the offset of a FieldDecl or IndirectFieldDecl, in bits. |
2249 | uint64_t getFieldOffset(const ValueDecl *FD) const; |
2250 | |
2251 | /// Get the offset of an ObjCIvarDecl in bits. |
2252 | uint64_t lookupFieldBitOffset(const ObjCInterfaceDecl *OID, |
2253 | const ObjCImplementationDecl *ID, |
2254 | const ObjCIvarDecl *Ivar) const; |
2255 | |
2256 | bool isNearlyEmpty(const CXXRecordDecl *RD) const; |
2257 | |
2258 | VTableContextBase *getVTableContext(); |
2259 | |
2260 | /// If \p T is null pointer, assume the target in ASTContext. |
2261 | MangleContext *createMangleContext(const TargetInfo *T = nullptr); |
2262 | |
2263 | void DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, bool leafClass, |
2264 | SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const; |
2265 | |
2266 | unsigned CountNonClassIvars(const ObjCInterfaceDecl *OI) const; |
2267 | void CollectInheritedProtocols(const Decl *CDecl, |
2268 | llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols); |
2269 | |
2270 | /// Return true if the specified type has unique object representations |
2271 | /// according to (C++17 [meta.unary.prop]p9) |
2272 | bool hasUniqueObjectRepresentations(QualType Ty) const; |
2273 | |
2274 | //===--------------------------------------------------------------------===// |
2275 | // Type Operators |
2276 | //===--------------------------------------------------------------------===// |
2277 | |
2278 | /// Return the canonical (structural) type corresponding to the |
2279 | /// specified potentially non-canonical type \p T. |
2280 | /// |
2281 | /// The non-canonical version of a type may have many "decorated" versions of |
2282 | /// types. Decorators can include typedefs, 'typeof' operators, etc. The |
2283 | /// returned type is guaranteed to be free of any of these, allowing two |
2284 | /// canonical types to be compared for exact equality with a simple pointer |
2285 | /// comparison. |
2286 | CanQualType getCanonicalType(QualType T) const { |
2287 | return CanQualType::CreateUnsafe(T.getCanonicalType()); |
2288 | } |
2289 | |
2290 | const Type *getCanonicalType(const Type *T) const { |
2291 | return T->getCanonicalTypeInternal().getTypePtr(); |
2292 | } |
2293 | |
2294 | /// Return the canonical parameter type corresponding to the specific |
2295 | /// potentially non-canonical one. |
2296 | /// |
2297 | /// Qualifiers are stripped off, functions are turned into function |
2298 | /// pointers, and arrays decay one level into pointers. |
2299 | CanQualType getCanonicalParamType(QualType T) const; |
2300 | |
2301 | /// Determine whether the given types \p T1 and \p T2 are equivalent. |
2302 | bool hasSameType(QualType T1, QualType T2) const { |
2303 | return getCanonicalType(T1) == getCanonicalType(T2); |
2304 | } |
2305 | bool hasSameType(const Type *T1, const Type *T2) const { |
2306 | return getCanonicalType(T1) == getCanonicalType(T2); |
2307 | } |
2308 | |
2309 | /// Return this type as a completely-unqualified array type, |
2310 | /// capturing the qualifiers in \p Quals. |
2311 | /// |
2312 | /// This will remove the minimal amount of sugaring from the types, similar |
2313 | /// to the behavior of QualType::getUnqualifiedType(). |
2314 | /// |
2315 | /// \param T is the qualified type, which may be an ArrayType |
2316 | /// |
2317 | /// \param Quals will receive the full set of qualifiers that were |
2318 | /// applied to the array. |
2319 | /// |
2320 | /// \returns if this is an array type, the completely unqualified array type |
2321 | /// that corresponds to it. Otherwise, returns T.getUnqualifiedType(). |
2322 | QualType getUnqualifiedArrayType(QualType T, Qualifiers &Quals); |
2323 | |
2324 | /// Determine whether the given types are equivalent after |
2325 | /// cvr-qualifiers have been removed. |
2326 | bool hasSameUnqualifiedType(QualType T1, QualType T2) const { |
2327 | return getCanonicalType(T1).getTypePtr() == |
2328 | getCanonicalType(T2).getTypePtr(); |
2329 | } |
2330 | |
2331 | bool hasSameNullabilityTypeQualifier(QualType SubT, QualType SuperT, |
2332 | bool IsParam) const { |
2333 | auto SubTnullability = SubT->getNullability(*this); |
2334 | auto SuperTnullability = SuperT->getNullability(*this); |
2335 | if (SubTnullability.hasValue() == SuperTnullability.hasValue()) { |
2336 | // Neither has nullability; return true |
2337 | if (!SubTnullability) |
2338 | return true; |
2339 | // Both have nullability qualifier. |
2340 | if (*SubTnullability == *SuperTnullability || |
2341 | *SubTnullability == NullabilityKind::Unspecified || |
2342 | *SuperTnullability == NullabilityKind::Unspecified) |
2343 | return true; |
2344 | |
2345 | if (IsParam) { |
2346 | // Ok for the superclass method parameter to be "nonnull" and the subclass |
2347 | // method parameter to be "nullable" |
2348 | return (*SuperTnullability == NullabilityKind::NonNull && |
2349 | *SubTnullability == NullabilityKind::Nullable); |
2350 | } |
2351 | else { |
2352 | // For the return type, it's okay for the superclass method to specify |
2353 | // "nullable" and the subclass method specify "nonnull" |
2354 | return (*SuperTnullability == NullabilityKind::Nullable && |
2355 | *SubTnullability == NullabilityKind::NonNull); |
2356 | } |
2357 | } |
2358 | return true; |
2359 | } |
2360 | |
2361 | bool ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, |
2362 | const ObjCMethodDecl *MethodImp); |
2363 | |
2364 | bool UnwrapSimilarTypes(QualType &T1, QualType &T2); |
2365 | bool UnwrapSimilarArrayTypes(QualType &T1, QualType &T2); |
2366 | |
2367 | /// Determine if two types are similar, according to the C++ rules. That is, |
2368 | /// determine if they are the same other than qualifiers on the initial |
2369 | /// sequence of pointer / pointer-to-member / array (and in Clang, object |
2370 | /// pointer) types and their element types. |
2371 | /// |
2372 | /// Clang offers a number of qualifiers in addition to the C++ qualifiers; |
2373 | /// those qualifiers are also ignored in the 'similarity' check. |
2374 | bool hasSimilarType(QualType T1, QualType T2); |
2375 | |
2376 | /// Determine if two types are similar, ignoring only CVR qualifiers. |
2377 | bool hasCvrSimilarType(QualType T1, QualType T2); |
2378 | |
2379 | /// Retrieves the "canonical" nested name specifier for a |
2380 | /// given nested name specifier. |
2381 | /// |
2382 | /// The canonical nested name specifier is a nested name specifier |
2383 | /// that uniquely identifies a type or namespace within the type |
2384 | /// system. For example, given: |
2385 | /// |
2386 | /// \code |
2387 | /// namespace N { |
2388 | /// struct S { |
2389 | /// template<typename T> struct X { typename T* type; }; |
2390 | /// }; |
2391 | /// } |
2392 | /// |
2393 | /// template<typename T> struct Y { |
2394 | /// typename N::S::X<T>::type member; |
2395 | /// }; |
2396 | /// \endcode |
2397 | /// |
2398 | /// Here, the nested-name-specifier for N::S::X<T>:: will be |
2399 | /// S::X<template-param-0-0>, since 'S' and 'X' are uniquely defined |
2400 | /// by declarations in the type system and the canonical type for |
2401 | /// the template type parameter 'T' is template-param-0-0. |
2402 | NestedNameSpecifier * |
2403 | getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const; |
2404 | |
2405 | /// Retrieves the default calling convention for the current target. |
2406 | CallingConv getDefaultCallingConvention(bool IsVariadic, |
2407 | bool IsCXXMethod, |
2408 | bool IsBuiltin = false) const; |
2409 | |
2410 | /// Retrieves the "canonical" template name that refers to a |
2411 | /// given template. |
2412 | /// |
2413 | /// The canonical template name is the simplest expression that can |
2414 | /// be used to refer to a given template. For most templates, this |
2415 | /// expression is just the template declaration itself. For example, |
2416 | /// the template std::vector can be referred to via a variety of |
2417 | /// names---std::vector, \::std::vector, vector (if vector is in |
2418 | /// scope), etc.---but all of these names map down to the same |
2419 | /// TemplateDecl, which is used to form the canonical template name. |
2420 | /// |
2421 | /// Dependent template names are more interesting. Here, the |
2422 | /// template name could be something like T::template apply or |
2423 | /// std::allocator<T>::template rebind, where the nested name |
2424 | /// specifier itself is dependent. In this case, the canonical |
2425 | /// template name uses the shortest form of the dependent |
2426 | /// nested-name-specifier, which itself contains all canonical |
2427 | /// types, values, and templates. |
2428 | TemplateName getCanonicalTemplateName(TemplateName Name) const; |
2429 | |
2430 | /// Determine whether the given template names refer to the same |
2431 | /// template. |
2432 | bool hasSameTemplateName(TemplateName X, TemplateName Y); |
2433 | |
2434 | /// Retrieve the "canonical" template argument. |
2435 | /// |
2436 | /// The canonical template argument is the simplest template argument |
2437 | /// (which may be a type, value, expression, or declaration) that |
2438 | /// expresses the value of the argument. |
2439 | TemplateArgument getCanonicalTemplateArgument(const TemplateArgument &Arg) |
2440 | const; |
2441 | |
2442 | /// Type Query functions. If the type is an instance of the specified class, |
2443 | /// return the Type pointer for the underlying maximally pretty type. This |
2444 | /// is a member of ASTContext because this may need to do some amount of |
2445 | /// canonicalization, e.g. to move type qualifiers into the element type. |
2446 | const ArrayType *getAsArrayType(QualType T) const; |
2447 | const ConstantArrayType *getAsConstantArrayType(QualType T) const { |
2448 | return dyn_cast_or_null<ConstantArrayType>(getAsArrayType(T)); |
2449 | } |
2450 | const VariableArrayType *getAsVariableArrayType(QualType T) const { |
2451 | return dyn_cast_or_null<VariableArrayType>(getAsArrayType(T)); |
2452 | } |
2453 | const IncompleteArrayType *getAsIncompleteArrayType(QualType T) const { |
2454 | return dyn_cast_or_null<IncompleteArrayType>(getAsArrayType(T)); |
2455 | } |
2456 | const DependentSizedArrayType *getAsDependentSizedArrayType(QualType T) |
2457 | const { |
2458 | return dyn_cast_or_null<DependentSizedArrayType>(getAsArrayType(T)); |
2459 | } |
2460 | |
2461 | /// Return the innermost element type of an array type. |
2462 | /// |
2463 | /// For example, will return "int" for int[m][n] |
2464 | QualType getBaseElementType(const ArrayType *VAT) const; |
2465 | |
2466 | /// Return the innermost element type of a type (which needn't |
2467 | /// actually be an array type). |
2468 | QualType getBaseElementType(QualType QT) const; |
2469 | |
2470 | /// Return number of constant array elements. |
2471 | uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const; |
2472 | |
2473 | /// Perform adjustment on the parameter type of a function. |
2474 | /// |
2475 | /// This routine adjusts the given parameter type @p T to the actual |
2476 | /// parameter type used by semantic analysis (C99 6.7.5.3p[7,8], |
2477 | /// C++ [dcl.fct]p3). The adjusted parameter type is returned. |
2478 | QualType getAdjustedParameterType(QualType T) const; |
2479 | |
2480 | /// Retrieve the parameter type as adjusted for use in the signature |
2481 | /// of a function, decaying array and function types and removing top-level |
2482 | /// cv-qualifiers. |
2483 | QualType getSignatureParameterType(QualType T) const; |
2484 | |
2485 | QualType getExceptionObjectType(QualType T) const; |
2486 | |
2487 | /// Return the properly qualified result of decaying the specified |
2488 | /// array type to a pointer. |
2489 | /// |
2490 | /// This operation is non-trivial when handling typedefs etc. The canonical |
2491 | /// type of \p T must be an array type, this returns a pointer to a properly |
2492 | /// qualified element of the array. |
2493 | /// |
2494 | /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. |
2495 | QualType getArrayDecayedType(QualType T) const; |
2496 | |
2497 | /// Return the type that \p PromotableType will promote to: C99 |
2498 | /// 6.3.1.1p2, assuming that \p PromotableType is a promotable integer type. |
2499 | QualType getPromotedIntegerType(QualType PromotableType) const; |
2500 | |
2501 | /// Recurses in pointer/array types until it finds an Objective-C |
2502 | /// retainable type and returns its ownership. |
2503 | Qualifiers::ObjCLifetime getInnerObjCOwnership(QualType T) const; |
2504 | |
2505 | /// Whether this is a promotable bitfield reference according |
2506 | /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). |
2507 | /// |
2508 | /// \returns the type this bit-field will promote to, or NULL if no |
2509 | /// promotion occurs. |
2510 | QualType isPromotableBitField(Expr *E) const; |
2511 | |
2512 | /// Return the highest ranked integer type, see C99 6.3.1.8p1. |
2513 | /// |
2514 | /// If \p LHS > \p RHS, returns 1. If \p LHS == \p RHS, returns 0. If |
2515 | /// \p LHS < \p RHS, return -1. |
2516 | int getIntegerTypeOrder(QualType LHS, QualType RHS) const; |
2517 | |
2518 | /// Compare the rank of the two specified floating point types, |
2519 | /// ignoring the domain of the type (i.e. 'double' == '_Complex double'). |
2520 | /// |
2521 | /// If \p LHS > \p RHS, returns 1. If \p LHS == \p RHS, returns 0. If |
2522 | /// \p LHS < \p RHS, return -1. |
2523 | int getFloatingTypeOrder(QualType LHS, QualType RHS) const; |
2524 | |
2525 | /// Compare the rank of two floating point types as above, but compare equal |
2526 | /// if both types have the same floating-point semantics on the target (i.e. |
2527 | /// long double and double on AArch64 will return 0). |
2528 | int getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const; |
2529 | |
2530 | /// Return a real floating point or a complex type (based on |
2531 | /// \p typeDomain/\p typeSize). |
2532 | /// |
2533 | /// \param typeDomain a real floating point or complex type. |
2534 | /// \param typeSize a real floating point or complex type. |
2535 | QualType getFloatingTypeOfSizeWithinDomain(QualType typeSize, |
2536 | QualType typeDomain) const; |
2537 | |
2538 | unsigned getTargetAddressSpace(QualType T) const { |
2539 | return getTargetAddressSpace(T.getQualifiers()); |
2540 | } |
2541 | |
2542 | unsigned getTargetAddressSpace(Qualifiers Q) const { |
2543 | return getTargetAddressSpace(Q.getAddressSpace()); |
2544 | } |
2545 | |
2546 | unsigned getTargetAddressSpace(LangAS AS) const; |
2547 | |
2548 | LangAS getLangASForBuiltinAddressSpace(unsigned AS) const; |
2549 | |
2550 | /// Get target-dependent integer value for null pointer which is used for |
2551 | /// constant folding. |
2552 | uint64_t getTargetNullPointerValue(QualType QT) const; |
2553 | |
2554 | bool addressSpaceMapManglingFor(LangAS AS) const { |
2555 | return AddrSpaceMapMangling || isTargetAddressSpace(AS); |
2556 | } |
2557 | |
2558 | private: |
2559 | // Helper for integer ordering |
2560 | unsigned getIntegerRank(const Type *T) const; |
2561 | |
2562 | public: |
2563 | //===--------------------------------------------------------------------===// |
2564 | // Type Compatibility Predicates |
2565 | //===--------------------------------------------------------------------===// |
2566 | |
2567 | /// Compatibility predicates used to check assignment expressions. |
2568 | bool typesAreCompatible(QualType T1, QualType T2, |
2569 | bool CompareUnqualified = false); // C99 6.2.7p1 |
2570 | |
2571 | bool propertyTypesAreCompatible(QualType, QualType); |
2572 | bool typesAreBlockPointerCompatible(QualType, QualType); |
2573 | |
2574 | bool isObjCIdType(QualType T) const { |
2575 | return T == getObjCIdType(); |
2576 | } |
2577 | |
2578 | bool isObjCClassType(QualType T) const { |
2579 | return T == getObjCClassType(); |
2580 | } |
2581 | |
2582 | bool isObjCSelType(QualType T) const { |
2583 | return T == getObjCSelType(); |
2584 | } |
2585 | |
2586 | bool ObjCQualifiedIdTypesAreCompatible(const ObjCObjectPointerType *LHS, |
2587 | const ObjCObjectPointerType *RHS, |
2588 | bool ForCompare); |
2589 | |
2590 | bool ObjCQualifiedClassTypesAreCompatible(const ObjCObjectPointerType *LHS, |
2591 | const ObjCObjectPointerType *RHS); |
2592 | |
2593 | // Check the safety of assignment from LHS to RHS |
2594 | bool canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, |
2595 | const ObjCObjectPointerType *RHSOPT); |
2596 | bool canAssignObjCInterfaces(const ObjCObjectType *LHS, |
2597 | const ObjCObjectType *RHS); |
2598 | bool canAssignObjCInterfacesInBlockPointer( |
2599 | const ObjCObjectPointerType *LHSOPT, |
2600 | const ObjCObjectPointerType *RHSOPT, |
2601 | bool BlockReturnType); |
2602 | bool areComparableObjCPointerTypes(QualType LHS, QualType RHS); |
2603 | QualType areCommonBaseCompatible(const ObjCObjectPointerType *LHSOPT, |
2604 | const ObjCObjectPointerType *RHSOPT); |
2605 | bool canBindObjCObjectType(QualType To, QualType From); |
2606 | |
2607 | // Functions for calculating composite types |
2608 | QualType mergeTypes(QualType, QualType, bool OfBlockPointer=false, |
2609 | bool Unqualified = false, bool BlockReturnType = false); |
2610 | QualType mergeFunctionTypes(QualType, QualType, bool OfBlockPointer=false, |
2611 | bool Unqualified = false); |
2612 | QualType mergeFunctionParameterTypes(QualType, QualType, |
2613 | bool OfBlockPointer = false, |
2614 | bool Unqualified = false); |
2615 | QualType mergeTransparentUnionType(QualType, QualType, |
2616 | bool OfBlockPointer=false, |
2617 | bool Unqualified = false); |
2618 | |
2619 | QualType mergeObjCGCQualifiers(QualType, QualType); |
2620 | |
2621 | /// This function merges the ExtParameterInfo lists of two functions. It |
2622 | /// returns true if the lists are compatible. The merged list is returned in |
2623 | /// NewParamInfos. |
2624 | /// |
2625 | /// \param FirstFnType The type of the first function. |
2626 | /// |
2627 | /// \param SecondFnType The type of the second function. |
2628 | /// |
2629 | /// \param CanUseFirst This flag is set to true if the first function's |
2630 | /// ExtParameterInfo list can be used as the composite list of |
2631 | /// ExtParameterInfo. |
2632 | /// |
2633 | /// \param CanUseSecond This flag is set to true if the second function's |
2634 | /// ExtParameterInfo list can be used as the composite list of |
2635 | /// ExtParameterInfo. |
2636 | /// |
2637 | /// \param NewParamInfos The composite list of ExtParameterInfo. The list is |
2638 | /// empty if none of the flags are set. |
2639 | /// |
2640 | bool mergeExtParameterInfo( |
2641 | const FunctionProtoType *FirstFnType, |
2642 | const FunctionProtoType *SecondFnType, |
2643 | bool &CanUseFirst, bool &CanUseSecond, |
2644 | SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos); |
2645 | |
2646 | void ResetObjCLayout(const ObjCContainerDecl *CD); |
2647 | |
2648 | //===--------------------------------------------------------------------===// |
2649 | // Integer Predicates |
2650 | //===--------------------------------------------------------------------===// |
2651 | |
2652 | // The width of an integer, as defined in C99 6.2.6.2. This is the number |
2653 | // of bits in an integer type excluding any padding bits. |
2654 | unsigned getIntWidth(QualType T) const; |
2655 | |
2656 | // Per C99 6.2.5p6, for every signed integer type, there is a corresponding |
2657 | // unsigned integer type. This method takes a signed type, and returns the |
2658 | // corresponding unsigned integer type. |
2659 | // With the introduction of fixed point types in ISO N1169, this method also |
2660 | // accepts fixed point types and returns the corresponding unsigned type for |
2661 | // a given fixed point type. |
2662 | QualType getCorrespondingUnsignedType(QualType T) const; |
2663 | |
2664 | // Per ISO N1169, this method accepts fixed point types and returns the |
2665 | // corresponding saturated type for a given fixed point type. |
2666 | QualType getCorrespondingSaturatedType(QualType Ty) const; |
2667 | |
2668 | // This method accepts fixed point types and returns the corresponding signed |
2669 | // type. Unlike getCorrespondingUnsignedType(), this only accepts unsigned |
2670 | // fixed point types because there are unsigned integer types like bool and |
2671 | // char8_t that don't have signed equivalents. |
2672 | QualType getCorrespondingSignedFixedPointType(QualType Ty) const; |
2673 | |
2674 | //===--------------------------------------------------------------------===// |
2675 | // Integer Values |
2676 | //===--------------------------------------------------------------------===// |
2677 | |
2678 | /// Make an APSInt of the appropriate width and signedness for the |
2679 | /// given \p Value and integer \p Type. |
2680 | llvm::APSInt MakeIntValue(uint64_t Value, QualType Type) const { |
2681 | // If Type is a signed integer type larger than 64 bits, we need to be sure |
2682 | // to sign extend Res appropriately. |
2683 | llvm::APSInt Res(64, !Type->isSignedIntegerOrEnumerationType()); |
2684 | Res = Value; |
2685 | unsigned Width = getIntWidth(Type); |
2686 | if (Width != Res.getBitWidth()) |
2687 | return Res.extOrTrunc(Width); |
2688 | return Res; |
2689 | } |
2690 | |
2691 | bool isSentinelNullExpr(const Expr *E); |
2692 | |
2693 | /// Get the implementation of the ObjCInterfaceDecl \p D, or nullptr if |
2694 | /// none exists. |
2695 | ObjCImplementationDecl *getObjCImplementation(ObjCInterfaceDecl *D); |
2696 | |
2697 | /// Get the implementation of the ObjCCategoryDecl \p D, or nullptr if |
2698 | /// none exists. |
2699 | ObjCCategoryImplDecl *getObjCImplementation(ObjCCategoryDecl *D); |
2700 | |
2701 | /// Return true if there is at least one \@implementation in the TU. |
2702 | bool AnyObjCImplementation() { |
2703 | return !ObjCImpls.empty(); |
2704 | } |
2705 | |
2706 | /// Set the implementation of ObjCInterfaceDecl. |
2707 | void setObjCImplementation(ObjCInterfaceDecl *IFaceD, |
2708 | ObjCImplementationDecl *ImplD); |
2709 | |
2710 | /// Set the implementation of ObjCCategoryDecl. |
2711 | void setObjCImplementation(ObjCCategoryDecl *CatD, |
2712 | ObjCCategoryImplDecl *ImplD); |
2713 | |
2714 | /// Get the duplicate declaration of a ObjCMethod in the same |
2715 | /// interface, or null if none exists. |
2716 | const ObjCMethodDecl * |
2717 | getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const; |
2718 | |
2719 | void setObjCMethodRedeclaration(const ObjCMethodDecl *MD, |
2720 | const ObjCMethodDecl *Redecl); |
2721 | |
2722 | /// Returns the Objective-C interface that \p ND belongs to if it is |
2723 | /// an Objective-C method/property/ivar etc. that is part of an interface, |
2724 | /// otherwise returns null. |
2725 | const ObjCInterfaceDecl *getObjContainingInterface(const NamedDecl *ND) const; |
2726 | |
2727 | /// Set the copy initialization expression of a block var decl. \p CanThrow |
2728 | /// indicates whether the copy expression can throw or not. |
2729 | void setBlockVarCopyInit(const VarDecl* VD, Expr *CopyExpr, bool CanThrow); |
2730 | |
2731 | /// Get the copy initialization expression of the VarDecl \p VD, or |
2732 | /// nullptr if none exists. |
2733 | BlockVarCopyInit getBlockVarCopyInit(const VarDecl* VD) const; |
2734 | |
2735 | /// Allocate an uninitialized TypeSourceInfo. |
2736 | /// |
2737 | /// The caller should initialize the memory held by TypeSourceInfo using |
2738 | /// the TypeLoc wrappers. |
2739 | /// |
2740 | /// \param T the type that will be the basis for type source info. This type |
2741 | /// should refer to how the declarator was written in source code, not to |
2742 | /// what type semantic analysis resolved the declarator to. |
2743 | /// |
2744 | /// \param Size the size of the type info to create, or 0 if the size |
2745 | /// should be calculated based on the type. |
2746 | TypeSourceInfo *CreateTypeSourceInfo(QualType T, unsigned Size = 0) const; |
2747 | |
2748 | /// Allocate a TypeSourceInfo where all locations have been |
2749 | /// initialized to a given location, which defaults to the empty |
2750 | /// location. |
2751 | TypeSourceInfo * |
2752 | getTrivialTypeSourceInfo(QualType T, |
2753 | SourceLocation Loc = SourceLocation()) const; |
2754 | |
2755 | /// Add a deallocation callback that will be invoked when the |
2756 | /// ASTContext is destroyed. |
2757 | /// |
2758 | /// \param Callback A callback function that will be invoked on destruction. |
2759 | /// |
2760 | /// \param Data Pointer data that will be provided to the callback function |
2761 | /// when it is called. |
2762 | void AddDeallocation(void (*Callback)(void *), void *Data) const; |
2763 | |
2764 | /// If T isn't trivially destructible, calls AddDeallocation to register it |
2765 | /// for destruction. |
2766 | template <typename T> void addDestruction(T *Ptr) const { |
2767 | if (!std::is_trivially_destructible<T>::value) { |
2768 | auto DestroyPtr = [](void *V) { static_cast<T *>(V)->~T(); }; |
2769 | AddDeallocation(DestroyPtr, Ptr); |
2770 | } |
2771 | } |
2772 | |
2773 | GVALinkage GetGVALinkageForFunction(const FunctionDecl *FD) const; |
2774 | GVALinkage GetGVALinkageForVariable(const VarDecl *VD); |
2775 | |
2776 | /// Determines if the decl can be CodeGen'ed or deserialized from PCH |
2777 | /// lazily, only when used; this is only relevant for function or file scoped |
2778 | /// var definitions. |
2779 | /// |
2780 | /// \returns true if the function/var must be CodeGen'ed/deserialized even if |
2781 | /// it is not used. |
2782 | bool DeclMustBeEmitted(const Decl *D); |
2783 | |
2784 | /// Visits all versions of a multiversioned function with the passed |
2785 | /// predicate. |
2786 | void forEachMultiversionedFunctionVersion( |
2787 | const FunctionDecl *FD, |
2788 | llvm::function_ref<void(FunctionDecl *)> Pred) const; |
2789 | |
2790 | const CXXConstructorDecl * |
2791 | getCopyConstructorForExceptionObject(CXXRecordDecl *RD); |
2792 | |
2793 | void addCopyConstructorForExceptionObject(CXXRecordDecl *RD, |
2794 | CXXConstructorDecl *CD); |
2795 | |
2796 | void addTypedefNameForUnnamedTagDecl(TagDecl *TD, TypedefNameDecl *TND); |
2797 | |
2798 | TypedefNameDecl *getTypedefNameForUnnamedTagDecl(const TagDecl *TD); |
2799 | |
2800 | void addDeclaratorForUnnamedTagDecl(TagDecl *TD, DeclaratorDecl *DD); |
2801 | |
2802 | DeclaratorDecl *getDeclaratorForUnnamedTagDecl(const TagDecl *TD); |
2803 | |
2804 | void setManglingNumber(const NamedDecl *ND, unsigned Number); |
2805 | unsigned getManglingNumber(const NamedDecl *ND) const; |
2806 | |
2807 | void setStaticLocalNumber(const VarDecl *VD, unsigned Number); |
2808 | unsigned getStaticLocalNumber(const VarDecl *VD) const; |
2809 | |
2810 | /// Retrieve the context for computing mangling numbers in the given |
2811 | /// DeclContext. |
2812 | MangleNumberingContext &getManglingNumberContext(const DeclContext *DC); |
2813 | |
2814 | std::unique_ptr<MangleNumberingContext> createMangleNumberingContext() const; |
2815 | |
2816 | /// Used by ParmVarDecl to store on the side the |
2817 | /// index of the parameter when it exceeds the size of the normal bitfield. |
2818 | void setParameterIndex(const ParmVarDecl *D, unsigned index); |
2819 | |
2820 | /// Used by ParmVarDecl to retrieve on the side the |
2821 | /// index of the parameter when it exceeds the size of the normal bitfield. |
2822 | unsigned getParameterIndex(const ParmVarDecl *D) const; |
2823 | |
2824 | /// Get the storage for the constant value of a materialized temporary |
2825 | /// of static storage duration. |
2826 | APValue *getMaterializedTemporaryValue(const MaterializeTemporaryExpr *E, |
2827 | bool MayCreate); |
2828 | |
2829 | /// Return a string representing the human readable name for the specified |
2830 | /// function declaration or file name. Used by SourceLocExpr and |
2831 | /// PredefinedExpr to cache evaluated results. |
2832 | StringLiteral *getPredefinedStringLiteralFromCache(StringRef Key) const; |
2833 | |
2834 | //===--------------------------------------------------------------------===// |
2835 | // Statistics |
2836 | //===--------------------------------------------------------------------===// |
2837 | |
2838 | /// The number of implicitly-declared default constructors. |
2839 | unsigned NumImplicitDefaultConstructors = 0; |
2840 | |
2841 | /// The number of implicitly-declared default constructors for |
2842 | /// which declarations were built. |
2843 | unsigned NumImplicitDefaultConstructorsDeclared = 0; |
2844 | |
2845 | /// The number of implicitly-declared copy constructors. |
2846 | unsigned NumImplicitCopyConstructors = 0; |
2847 | |
2848 | /// The number of implicitly-declared copy constructors for |
2849 | /// which declarations were built. |
2850 | unsigned NumImplicitCopyConstructorsDeclared = 0; |
2851 | |
2852 | /// The number of implicitly-declared move constructors. |
2853 | unsigned NumImplicitMoveConstructors = 0; |
2854 | |
2855 | /// The number of implicitly-declared move constructors for |
2856 | /// which declarations were built. |
2857 | unsigned NumImplicitMoveConstructorsDeclared = 0; |
2858 | |
2859 | /// The number of implicitly-declared copy assignment operators. |
2860 | unsigned NumImplicitCopyAssignmentOperators = 0; |
2861 | |
2862 | /// The number of implicitly-declared copy assignment operators for |
2863 | /// which declarations were built. |
2864 | unsigned NumImplicitCopyAssignmentOperatorsDeclared = 0; |
2865 | |
2866 | /// The number of implicitly-declared move assignment operators. |
2867 | unsigned NumImplicitMoveAssignmentOperators = 0; |
2868 | |
2869 | /// The number of implicitly-declared move assignment operators for |
2870 | /// which declarations were built. |
2871 | unsigned NumImplicitMoveAssignmentOperatorsDeclared = 0; |
2872 | |
2873 | /// The number of implicitly-declared destructors. |
2874 | unsigned NumImplicitDestructors = 0; |
2875 | |
2876 | /// The number of implicitly-declared destructors for which |
2877 | /// declarations were built. |
2878 | unsigned NumImplicitDestructorsDeclared = 0; |
2879 | |
2880 | public: |
2881 | /// Initialize built-in types. |
2882 | /// |
2883 | /// This routine may only be invoked once for a given ASTContext object. |
2884 | /// It is normally invoked after ASTContext construction. |
2885 | /// |
2886 | /// \param Target The target |
2887 | void InitBuiltinTypes(const TargetInfo &Target, |
2888 | const TargetInfo *AuxTarget = nullptr); |
2889 | |
2890 | private: |
2891 | void InitBuiltinType(CanQualType &R, BuiltinType::Kind K); |
2892 | |
2893 | class ObjCEncOptions { |
2894 | unsigned Bits; |
2895 | |
2896 | ObjCEncOptions(unsigned Bits) : Bits(Bits) {} |
2897 | |
2898 | public: |
2899 | ObjCEncOptions() : Bits(0) {} |
2900 | ObjCEncOptions(const ObjCEncOptions &RHS) : Bits(RHS.Bits) {} |
2901 | |
2902 | #define OPT_LIST(V) \ |
2903 | V(ExpandPointedToStructures, 0) \ |
2904 | V(ExpandStructures, 1) \ |
2905 | V(IsOutermostType, 2) \ |
2906 | V(EncodingProperty, 3) \ |
2907 | V(IsStructField, 4) \ |
2908 | V(EncodeBlockParameters, 5) \ |
2909 | V(EncodeClassNames, 6) \ |
2910 | |
2911 | #define V(N,I) ObjCEncOptions& set##N() { Bits |= 1 << I; return *this; } |
2912 | OPT_LIST(V) |
2913 | #undef V |
2914 | |
2915 | #define V(N,I) bool N() const { return Bits & 1 << I; } |
2916 | OPT_LIST(V) |
2917 | #undef V |
2918 | |
2919 | #undef OPT_LIST |
2920 | |
2921 | LLVM_NODISCARD[[clang::warn_unused_result]] ObjCEncOptions keepingOnly(ObjCEncOptions Mask) const { |
2922 | return Bits & Mask.Bits; |
2923 | } |
2924 | |
2925 | LLVM_NODISCARD[[clang::warn_unused_result]] ObjCEncOptions forComponentType() const { |
2926 | ObjCEncOptions Mask = ObjCEncOptions() |
2927 | .setIsOutermostType() |
2928 | .setIsStructField(); |
2929 | return Bits & ~Mask.Bits; |
2930 | } |
2931 | }; |
2932 | |
2933 | // Return the Objective-C type encoding for a given type. |
2934 | void getObjCEncodingForTypeImpl(QualType t, std::string &S, |
2935 | ObjCEncOptions Options, |
2936 | const FieldDecl *Field, |
2937 | QualType *NotEncodedT = nullptr) const; |
2938 | |
2939 | // Adds the encoding of the structure's members. |
2940 | void getObjCEncodingForStructureImpl(RecordDecl *RD, std::string &S, |
2941 | const FieldDecl *Field, |
2942 | bool includeVBases = true, |
2943 | QualType *NotEncodedT=nullptr) const; |
2944 | |
2945 | public: |
2946 | // Adds the encoding of a method parameter or return type. |
2947 | void getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, |
2948 | QualType T, std::string& S, |
2949 | bool Extended) const; |
2950 | |
2951 | /// Returns true if this is an inline-initialized static data member |
2952 | /// which is treated as a definition for MSVC compatibility. |
2953 | bool isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const; |
2954 | |
2955 | enum class InlineVariableDefinitionKind { |
2956 | /// Not an inline variable. |
2957 | None, |
2958 | |
2959 | /// Weak definition of inline variable. |
2960 | Weak, |
2961 | |
2962 | /// Weak for now, might become strong later in this TU. |
2963 | WeakUnknown, |
2964 | |
2965 | /// Strong definition. |
2966 | Strong |
2967 | }; |
2968 | |
2969 | /// Determine whether a definition of this inline variable should |
2970 | /// be treated as a weak or strong definition. For compatibility with |
2971 | /// C++14 and before, for a constexpr static data member, if there is an |
2972 | /// out-of-line declaration of the member, we may promote it from weak to |
2973 | /// strong. |
2974 | InlineVariableDefinitionKind |
2975 | getInlineVariableDefinitionKind(const VarDecl *VD) const; |
2976 | |
2977 | private: |
2978 | friend class DeclarationNameTable; |
2979 | friend class DeclContext; |
2980 | |
2981 | const ASTRecordLayout & |
2982 | getObjCLayout(const ObjCInterfaceDecl *D, |
2983 | const ObjCImplementationDecl *Impl) const; |
2984 | |
2985 | /// A set of deallocations that should be performed when the |
2986 | /// ASTContext is destroyed. |
2987 | // FIXME: We really should have a better mechanism in the ASTContext to |
2988 | // manage running destructors for types which do variable sized allocation |
2989 | // within the AST. In some places we thread the AST bump pointer allocator |
2990 | // into the datastructures which avoids this mess during deallocation but is |
2991 | // wasteful of memory, and here we require a lot of error prone book keeping |
2992 | // in order to track and run destructors while we're tearing things down. |
2993 | using DeallocationFunctionsAndArguments = |
2994 | llvm::SmallVector<std::pair<void (*)(void *), void *>, 16>; |
2995 | mutable DeallocationFunctionsAndArguments Deallocations; |
2996 | |
2997 | // FIXME: This currently contains the set of StoredDeclMaps used |
2998 | // by DeclContext objects. This probably should not be in ASTContext, |
2999 | // but we include it here so that ASTContext can quickly deallocate them. |
3000 | llvm::PointerIntPair<StoredDeclsMap *, 1> LastSDM; |
3001 | |
3002 | std::vector<Decl *> TraversalScope; |
3003 | class ParentMap; |
3004 | std::unique_ptr<ParentMap> Parents; |
3005 | |
3006 | std::unique_ptr<VTableContextBase> VTContext; |
3007 | |
3008 | void ReleaseDeclContextMaps(); |
3009 | |
3010 | public: |
3011 | enum PragmaSectionFlag : unsigned { |
3012 | PSF_None = 0, |
3013 | PSF_Read = 0x1, |
3014 | PSF_Write = 0x2, |
3015 | PSF_Execute = 0x4, |
3016 | PSF_Implicit = 0x8, |
3017 | PSF_Invalid = 0x80000000U, |
3018 | }; |
3019 | |
3020 | struct SectionInfo { |
3021 | DeclaratorDecl *Decl; |
3022 | SourceLocation PragmaSectionLocation; |
3023 | int SectionFlags; |
3024 | |
3025 | SectionInfo() = default; |
3026 | SectionInfo(DeclaratorDecl *Decl, |
3027 | SourceLocation PragmaSectionLocation, |
3028 | int SectionFlags) |
3029 | : Decl(Decl), PragmaSectionLocation(PragmaSectionLocation), |
3030 | SectionFlags(SectionFlags) {} |
3031 | }; |
3032 | |
3033 | llvm::StringMap<SectionInfo> SectionInfos; |
3034 | }; |
3035 | |
3036 | /// Utility function for constructing a nullary selector. |
3037 | inline Selector GetNullarySelector(StringRef name, ASTContext &Ctx) { |
3038 | IdentifierInfo* II = &Ctx.Idents.get(name); |
3039 | return Ctx.Selectors.getSelector(0, &II); |
3040 | } |
3041 | |
3042 | /// Utility function for constructing an unary selector. |
3043 | inline Selector GetUnarySelector(StringRef name, ASTContext &Ctx) { |
3044 | IdentifierInfo* II = &Ctx.Idents.get(name); |
3045 | return Ctx.Selectors.getSelector(1, &II); |
3046 | } |
3047 | |
3048 | } // namespace clang |
3049 | |
3050 | // operator new and delete aren't allowed inside namespaces. |
3051 | |
3052 | /// Placement new for using the ASTContext's allocator. |
3053 | /// |
3054 | /// This placement form of operator new uses the ASTContext's allocator for |
3055 | /// obtaining memory. |
3056 | /// |
3057 | /// IMPORTANT: These are also declared in clang/AST/ASTContextAllocate.h! |
3058 | /// Any changes here need to also be made there. |
3059 | /// |
3060 | /// We intentionally avoid using a nothrow specification here so that the calls |
3061 | /// to this operator will not perform a null check on the result -- the |
3062 | /// underlying allocator never returns null pointers. |
3063 | /// |
3064 | /// Usage looks like this (assuming there's an ASTContext 'Context' in scope): |
3065 | /// @code |
3066 | /// // Default alignment (8) |
3067 | /// IntegerLiteral *Ex = new (Context) IntegerLiteral(arguments); |
3068 | /// // Specific alignment |
3069 | /// IntegerLiteral *Ex2 = new (Context, 4) IntegerLiteral(arguments); |
3070 | /// @endcode |
3071 | /// Memory allocated through this placement new operator does not need to be |
3072 | /// explicitly freed, as ASTContext will free all of this memory when it gets |
3073 | /// destroyed. Please note that you cannot use delete on the pointer. |
3074 | /// |
3075 | /// @param Bytes The number of bytes to allocate. Calculated by the compiler. |
3076 | /// @param C The ASTContext that provides the allocator. |
3077 | /// @param Alignment The alignment of the allocated memory (if the underlying |
3078 | /// allocator supports it). |
3079 | /// @return The allocated memory. Could be nullptr. |
3080 | inline void *operator new(size_t Bytes, const clang::ASTContext &C, |
3081 | size_t Alignment /* = 8 */) { |
3082 | return C.Allocate(Bytes, Alignment); |
3083 | } |
3084 | |
3085 | /// Placement delete companion to the new above. |
3086 | /// |
3087 | /// This operator is just a companion to the new above. There is no way of |
3088 | /// invoking it directly; see the new operator for more details. This operator |
3089 | /// is called implicitly by the compiler if a placement new expression using |
3090 | /// the ASTContext throws in the object constructor. |
3091 | inline void operator delete(void *Ptr, const clang::ASTContext &C, size_t) { |
3092 | C.Deallocate(Ptr); |
3093 | } |
3094 | |
3095 | /// This placement form of operator new[] uses the ASTContext's allocator for |
3096 | /// obtaining memory. |
3097 | /// |
3098 | /// We intentionally avoid using a nothrow specification here so that the calls |
3099 | /// to this operator will not perform a null check on the result -- the |
3100 | /// underlying allocator never returns null pointers. |
3101 | /// |
3102 | /// Usage looks like this (assuming there's an ASTContext 'Context' in scope): |
3103 | /// @code |
3104 | /// // Default alignment (8) |
3105 | /// char *data = new (Context) char[10]; |
3106 | /// // Specific alignment |
3107 | /// char *data = new (Context, 4) char[10]; |
3108 | /// @endcode |
3109 | /// Memory allocated through this placement new[] operator does not need to be |
3110 | /// explicitly freed, as ASTContext will free all of this memory when it gets |
3111 | /// destroyed. Please note that you cannot use delete on the pointer. |
3112 | /// |
3113 | /// @param Bytes The number of bytes to allocate. Calculated by the compiler. |
3114 | /// @param C The ASTContext that provides the allocator. |
3115 | /// @param Alignment The alignment of the allocated memory (if the underlying |
3116 | /// allocator supports it). |
3117 | /// @return The allocated memory. Could be nullptr. |
3118 | inline void *operator new[](size_t Bytes, const clang::ASTContext& C, |
3119 | size_t Alignment /* = 8 */) { |
3120 | return C.Allocate(Bytes, Alignment); |
3121 | } |
3122 | |
3123 | /// Placement delete[] companion to the new[] above. |
3124 | /// |
3125 | /// This operator is just a companion to the new[] above. There is no way of |
3126 | /// invoking it directly; see the new[] operator for more details. This operator |
3127 | /// is called implicitly by the compiler if a placement new[] expression using |
3128 | /// the ASTContext throws in the object constructor. |
3129 | inline void operator delete[](void *Ptr, const clang::ASTContext &C, size_t) { |
3130 | C.Deallocate(Ptr); |
3131 | } |
3132 | |
3133 | /// Create the representation of a LazyGenerationalUpdatePtr. |
3134 | template <typename Owner, typename T, |
3135 | void (clang::ExternalASTSource::*Update)(Owner)> |
3136 | typename clang::LazyGenerationalUpdatePtr<Owner, T, Update>::ValueType |
3137 | clang::LazyGenerationalUpdatePtr<Owner, T, Update>::makeValue( |
3138 | const clang::ASTContext &Ctx, T Value) { |
3139 | // Note, this is implemented here so that ExternalASTSource.h doesn't need to |
3140 | // include ASTContext.h. We explicitly instantiate it for all relevant types |
3141 | // in ASTContext.cpp. |
3142 | if (auto *Source = Ctx.getExternalSource()) |
3143 | return new (Ctx) LazyData(Source, Value); |
3144 | return Value; |
3145 | } |
3146 | |
3147 | #endif // LLVM_CLANG_AST_ASTCONTEXT_H |