Bug Summary

File:clang/lib/CodeGen/TargetInfo.cpp
Warning:line 10319, column 24
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name TargetInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/include -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-08-06-171148-17323-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp

/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp

1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#include "TargetInfo.h"
15#include "ABIInfo.h"
16#include "CGBlocks.h"
17#include "CGCXXABI.h"
18#include "CGValue.h"
19#include "CodeGenFunction.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/RecordLayout.h"
22#include "clang/Basic/CodeGenOptions.h"
23#include "clang/Basic/DiagnosticFrontend.h"
24#include "clang/CodeGen/CGFunctionInfo.h"
25#include "clang/CodeGen/SwiftCallingConv.h"
26#include "llvm/ADT/SmallBitVector.h"
27#include "llvm/ADT/StringExtras.h"
28#include "llvm/ADT/StringSwitch.h"
29#include "llvm/ADT/Triple.h"
30#include "llvm/ADT/Twine.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/IntrinsicsNVPTX.h"
33#include "llvm/IR/Type.h"
34#include "llvm/Support/raw_ostream.h"
35#include <algorithm> // std::sort
36
37using namespace clang;
38using namespace CodeGen;
39
40// Helper for coercing an aggregate argument or return value into an integer
41// array of the same size (including padding) and alignment. This alternate
42// coercion happens only for the RenderScript ABI and can be removed after
43// runtimes that rely on it are no longer supported.
44//
45// RenderScript assumes that the size of the argument / return value in the IR
46// is the same as the size of the corresponding qualified type. This helper
47// coerces the aggregate type into an array of the same size (including
48// padding). This coercion is used in lieu of expansion of struct members or
49// other canonical coercions that return a coerced-type of larger size.
50//
51// Ty - The argument / return value type
52// Context - The associated ASTContext
53// LLVMContext - The associated LLVMContext
54static ABIArgInfo coerceToIntArray(QualType Ty,
55 ASTContext &Context,
56 llvm::LLVMContext &LLVMContext) {
57 // Alignment and Size are measured in bits.
58 const uint64_t Size = Context.getTypeSize(Ty);
59 const uint64_t Alignment = Context.getTypeAlign(Ty);
60 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
61 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
62 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
63}
64
65static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
66 llvm::Value *Array,
67 llvm::Value *Value,
68 unsigned FirstIndex,
69 unsigned LastIndex) {
70 // Alternatively, we could emit this as a loop in the source.
71 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
72 llvm::Value *Cell =
73 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
74 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
75 }
76}
77
78static bool isAggregateTypeForABI(QualType T) {
79 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
80 T->isMemberFunctionPointerType();
81}
82
83ABIArgInfo
84ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
85 llvm::Type *Padding) const {
86 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
87 ByRef, Realign, Padding);
88}
89
90ABIArgInfo
91ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
92 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
93 /*ByRef*/ false, Realign);
94}
95
96Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
97 QualType Ty) const {
98 return Address::invalid();
99}
100
101bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
102 if (Ty->isPromotableIntegerType())
103 return true;
104
105 if (const auto *EIT = Ty->getAs<ExtIntType>())
106 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
107 return true;
108
109 return false;
110}
111
112ABIInfo::~ABIInfo() {}
113
114/// Does the given lowering require more than the given number of
115/// registers when expanded?
116///
117/// This is intended to be the basis of a reasonable basic implementation
118/// of should{Pass,Return}IndirectlyForSwift.
119///
120/// For most targets, a limit of four total registers is reasonable; this
121/// limits the amount of code required in order to move around the value
122/// in case it wasn't produced immediately prior to the call by the caller
123/// (or wasn't produced in exactly the right registers) or isn't used
124/// immediately within the callee. But some targets may need to further
125/// limit the register count due to an inability to support that many
126/// return registers.
127static bool occupiesMoreThan(CodeGenTypes &cgt,
128 ArrayRef<llvm::Type*> scalarTypes,
129 unsigned maxAllRegisters) {
130 unsigned intCount = 0, fpCount = 0;
131 for (llvm::Type *type : scalarTypes) {
132 if (type->isPointerTy()) {
133 intCount++;
134 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
135 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
136 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
137 } else {
138 assert(type->isVectorTy() || type->isFloatingPointTy())((type->isVectorTy() || type->isFloatingPointTy()) ? static_cast
<void> (0) : __assert_fail ("type->isVectorTy() || type->isFloatingPointTy()"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 138, __PRETTY_FUNCTION__))
;
139 fpCount++;
140 }
141 }
142
143 return (intCount + fpCount > maxAllRegisters);
144}
145
146bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
147 llvm::Type *eltTy,
148 unsigned numElts) const {
149 // The default implementation of this assumes that the target guarantees
150 // 128-bit SIMD support but nothing more.
151 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
152}
153
154static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
155 CGCXXABI &CXXABI) {
156 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
157 if (!RD) {
158 if (!RT->getDecl()->canPassInRegisters())
159 return CGCXXABI::RAA_Indirect;
160 return CGCXXABI::RAA_Default;
161 }
162 return CXXABI.getRecordArgABI(RD);
163}
164
165static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
166 CGCXXABI &CXXABI) {
167 const RecordType *RT = T->getAs<RecordType>();
168 if (!RT)
169 return CGCXXABI::RAA_Default;
170 return getRecordArgABI(RT, CXXABI);
171}
172
173static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
174 const ABIInfo &Info) {
175 QualType Ty = FI.getReturnType();
176
177 if (const auto *RT = Ty->getAs<RecordType>())
178 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
179 !RT->getDecl()->canPassInRegisters()) {
180 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
181 return true;
182 }
183
184 return CXXABI.classifyReturnType(FI);
185}
186
187/// Pass transparent unions as if they were the type of the first element. Sema
188/// should ensure that all elements of the union have the same "machine type".
189static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
190 if (const RecordType *UT = Ty->getAsUnionType()) {
191 const RecordDecl *UD = UT->getDecl();
192 if (UD->hasAttr<TransparentUnionAttr>()) {
193 assert(!UD->field_empty() && "sema created an empty transparent union")((!UD->field_empty() && "sema created an empty transparent union"
) ? static_cast<void> (0) : __assert_fail ("!UD->field_empty() && \"sema created an empty transparent union\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 193, __PRETTY_FUNCTION__))
;
194 return UD->field_begin()->getType();
195 }
196 }
197 return Ty;
198}
199
200CGCXXABI &ABIInfo::getCXXABI() const {
201 return CGT.getCXXABI();
202}
203
204ASTContext &ABIInfo::getContext() const {
205 return CGT.getContext();
206}
207
208llvm::LLVMContext &ABIInfo::getVMContext() const {
209 return CGT.getLLVMContext();
210}
211
212const llvm::DataLayout &ABIInfo::getDataLayout() const {
213 return CGT.getDataLayout();
214}
215
216const TargetInfo &ABIInfo::getTarget() const {
217 return CGT.getTarget();
218}
219
220const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
221 return CGT.getCodeGenOpts();
222}
223
224bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
225
226bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
227 return false;
228}
229
230bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
231 uint64_t Members) const {
232 return false;
233}
234
235LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void ABIArgInfo::dump() const {
236 raw_ostream &OS = llvm::errs();
237 OS << "(ABIArgInfo Kind=";
238 switch (TheKind) {
239 case Direct:
240 OS << "Direct Type=";
241 if (llvm::Type *Ty = getCoerceToType())
242 Ty->print(OS);
243 else
244 OS << "null";
245 break;
246 case Extend:
247 OS << "Extend";
248 break;
249 case Ignore:
250 OS << "Ignore";
251 break;
252 case InAlloca:
253 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
254 break;
255 case Indirect:
256 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
257 << " ByVal=" << getIndirectByVal()
258 << " Realign=" << getIndirectRealign();
259 break;
260 case Expand:
261 OS << "Expand";
262 break;
263 case CoerceAndExpand:
264 OS << "CoerceAndExpand Type=";
265 getCoerceAndExpandType()->print(OS);
266 break;
267 }
268 OS << ")\n";
269}
270
271// Dynamically round a pointer up to a multiple of the given alignment.
272static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
273 llvm::Value *Ptr,
274 CharUnits Align) {
275 llvm::Value *PtrAsInt = Ptr;
276 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
277 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
278 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
279 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
280 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
281 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
282 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
283 Ptr->getType(),
284 Ptr->getName() + ".aligned");
285 return PtrAsInt;
286}
287
288/// Emit va_arg for a platform using the common void* representation,
289/// where arguments are simply emitted in an array of slots on the stack.
290///
291/// This version implements the core direct-value passing rules.
292///
293/// \param SlotSize - The size and alignment of a stack slot.
294/// Each argument will be allocated to a multiple of this number of
295/// slots, and all the slots will be aligned to this value.
296/// \param AllowHigherAlign - The slot alignment is not a cap;
297/// an argument type with an alignment greater than the slot size
298/// will be emitted on a higher-alignment address, potentially
299/// leaving one or more empty slots behind as padding. If this
300/// is false, the returned address might be less-aligned than
301/// DirectAlign.
302static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
303 Address VAListAddr,
304 llvm::Type *DirectTy,
305 CharUnits DirectSize,
306 CharUnits DirectAlign,
307 CharUnits SlotSize,
308 bool AllowHigherAlign) {
309 // Cast the element type to i8* if necessary. Some platforms define
310 // va_list as a struct containing an i8* instead of just an i8*.
311 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
312 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
313
314 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
315
316 // If the CC aligns values higher than the slot size, do so if needed.
317 Address Addr = Address::invalid();
318 if (AllowHigherAlign && DirectAlign > SlotSize) {
319 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
320 DirectAlign);
321 } else {
322 Addr = Address(Ptr, SlotSize);
323 }
324
325 // Advance the pointer past the argument, then store that back.
326 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
327 Address NextPtr =
328 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
329 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
330
331 // If the argument is smaller than a slot, and this is a big-endian
332 // target, the argument will be right-adjusted in its slot.
333 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
334 !DirectTy->isStructTy()) {
335 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
336 }
337
338 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
339 return Addr;
340}
341
342/// Emit va_arg for a platform using the common void* representation,
343/// where arguments are simply emitted in an array of slots on the stack.
344///
345/// \param IsIndirect - Values of this type are passed indirectly.
346/// \param ValueInfo - The size and alignment of this type, generally
347/// computed with getContext().getTypeInfoInChars(ValueTy).
348/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
349/// Each argument will be allocated to a multiple of this number of
350/// slots, and all the slots will be aligned to this value.
351/// \param AllowHigherAlign - The slot alignment is not a cap;
352/// an argument type with an alignment greater than the slot size
353/// will be emitted on a higher-alignment address, potentially
354/// leaving one or more empty slots behind as padding.
355static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
356 QualType ValueTy, bool IsIndirect,
357 std::pair<CharUnits, CharUnits> ValueInfo,
358 CharUnits SlotSizeAndAlign,
359 bool AllowHigherAlign) {
360 // The size and alignment of the value that was passed directly.
361 CharUnits DirectSize, DirectAlign;
362 if (IsIndirect) {
363 DirectSize = CGF.getPointerSize();
364 DirectAlign = CGF.getPointerAlign();
365 } else {
366 DirectSize = ValueInfo.first;
367 DirectAlign = ValueInfo.second;
368 }
369
370 // Cast the address we've calculated to the right type.
371 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
372 if (IsIndirect)
373 DirectTy = DirectTy->getPointerTo(0);
374
375 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
376 DirectSize, DirectAlign,
377 SlotSizeAndAlign,
378 AllowHigherAlign);
379
380 if (IsIndirect) {
381 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
382 }
383
384 return Addr;
385
386}
387
388static Address emitMergePHI(CodeGenFunction &CGF,
389 Address Addr1, llvm::BasicBlock *Block1,
390 Address Addr2, llvm::BasicBlock *Block2,
391 const llvm::Twine &Name = "") {
392 assert(Addr1.getType() == Addr2.getType())((Addr1.getType() == Addr2.getType()) ? static_cast<void>
(0) : __assert_fail ("Addr1.getType() == Addr2.getType()", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 392, __PRETTY_FUNCTION__))
;
393 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
394 PHI->addIncoming(Addr1.getPointer(), Block1);
395 PHI->addIncoming(Addr2.getPointer(), Block2);
396 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
397 return Address(PHI, Align);
398}
399
400TargetCodeGenInfo::~TargetCodeGenInfo() = default;
401
402// If someone can figure out a general rule for this, that would be great.
403// It's probably just doomed to be platform-dependent, though.
404unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
405 // Verified for:
406 // x86-64 FreeBSD, Linux, Darwin
407 // x86-32 FreeBSD, Linux, Darwin
408 // PowerPC Linux, Darwin
409 // ARM Darwin (*not* EABI)
410 // AArch64 Linux
411 return 32;
412}
413
414bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
415 const FunctionNoProtoType *fnType) const {
416 // The following conventions are known to require this to be false:
417 // x86_stdcall
418 // MIPS
419 // For everything else, we just prefer false unless we opt out.
420 return false;
421}
422
423void
424TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
425 llvm::SmallString<24> &Opt) const {
426 // This assumes the user is passing a library name like "rt" instead of a
427 // filename like "librt.a/so", and that they don't care whether it's static or
428 // dynamic.
429 Opt = "-l";
430 Opt += Lib;
431}
432
433unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
434 // OpenCL kernels are called via an explicit runtime API with arguments
435 // set with clSetKernelArg(), not as normal sub-functions.
436 // Return SPIR_KERNEL by default as the kernel calling convention to
437 // ensure the fingerprint is fixed such way that each OpenCL argument
438 // gets one matching argument in the produced kernel function argument
439 // list to enable feasible implementation of clSetKernelArg() with
440 // aggregates etc. In case we would use the default C calling conv here,
441 // clSetKernelArg() might break depending on the target-specific
442 // conventions; different targets might split structs passed as values
443 // to multiple function arguments etc.
444 return llvm::CallingConv::SPIR_KERNEL;
445}
446
447llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
448 llvm::PointerType *T, QualType QT) const {
449 return llvm::ConstantPointerNull::get(T);
450}
451
452LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
453 const VarDecl *D) const {
454 assert(!CGM.getLangOpts().OpenCL &&((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA
&& CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only"
) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 456, __PRETTY_FUNCTION__))
455 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA
&& CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only"
) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 456, __PRETTY_FUNCTION__))
456 "Address space agnostic languages only")((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA
&& CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only"
) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 456, __PRETTY_FUNCTION__))
;
457 return D ? D->getType().getAddressSpace() : LangAS::Default;
458}
459
460llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
461 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
462 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
463 // Since target may map different address spaces in AST to the same address
464 // space, an address space conversion may end up as a bitcast.
465 if (auto *C = dyn_cast<llvm::Constant>(Src))
466 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
467 // Try to preserve the source's name to make IR more readable.
468 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
469 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
470}
471
472llvm::Constant *
473TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
474 LangAS SrcAddr, LangAS DestAddr,
475 llvm::Type *DestTy) const {
476 // Since target may map different address spaces in AST to the same address
477 // space, an address space conversion may end up as a bitcast.
478 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
479}
480
481llvm::SyncScope::ID
482TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
483 SyncScope Scope,
484 llvm::AtomicOrdering Ordering,
485 llvm::LLVMContext &Ctx) const {
486 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
487}
488
489static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
490
491/// isEmptyField - Return true iff a the field is "empty", that is it
492/// is an unnamed bit-field or an (array of) empty record(s).
493static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
494 bool AllowArrays) {
495 if (FD->isUnnamedBitfield())
496 return true;
497
498 QualType FT = FD->getType();
499
500 // Constant arrays of empty records count as empty, strip them off.
501 // Constant arrays of zero length always count as empty.
502 bool WasArray = false;
503 if (AllowArrays)
504 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
505 if (AT->getSize() == 0)
506 return true;
507 FT = AT->getElementType();
508 // The [[no_unique_address]] special case below does not apply to
509 // arrays of C++ empty records, so we need to remember this fact.
510 WasArray = true;
511 }
512
513 const RecordType *RT = FT->getAs<RecordType>();
514 if (!RT)
515 return false;
516
517 // C++ record fields are never empty, at least in the Itanium ABI.
518 //
519 // FIXME: We should use a predicate for whether this behavior is true in the
520 // current ABI.
521 //
522 // The exception to the above rule are fields marked with the
523 // [[no_unique_address]] attribute (since C++20). Those do count as empty
524 // according to the Itanium ABI. The exception applies only to records,
525 // not arrays of records, so we must also check whether we stripped off an
526 // array type above.
527 if (isa<CXXRecordDecl>(RT->getDecl()) &&
528 (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
529 return false;
530
531 return isEmptyRecord(Context, FT, AllowArrays);
532}
533
534/// isEmptyRecord - Return true iff a structure contains only empty
535/// fields. Note that a structure with a flexible array member is not
536/// considered empty.
537static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
538 const RecordType *RT = T->getAs<RecordType>();
539 if (!RT)
540 return false;
541 const RecordDecl *RD = RT->getDecl();
542 if (RD->hasFlexibleArrayMember())
543 return false;
544
545 // If this is a C++ record, check the bases first.
546 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
547 for (const auto &I : CXXRD->bases())
548 if (!isEmptyRecord(Context, I.getType(), true))
549 return false;
550
551 for (const auto *I : RD->fields())
552 if (!isEmptyField(Context, I, AllowArrays))
553 return false;
554 return true;
555}
556
557/// isSingleElementStruct - Determine if a structure is a "single
558/// element struct", i.e. it has exactly one non-empty field or
559/// exactly one field which is itself a single element
560/// struct. Structures with flexible array members are never
561/// considered single element structs.
562///
563/// \return The field declaration for the single non-empty field, if
564/// it exists.
565static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
566 const RecordType *RT = T->getAs<RecordType>();
567 if (!RT)
568 return nullptr;
569
570 const RecordDecl *RD = RT->getDecl();
571 if (RD->hasFlexibleArrayMember())
572 return nullptr;
573
574 const Type *Found = nullptr;
575
576 // If this is a C++ record, check the bases first.
577 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
578 for (const auto &I : CXXRD->bases()) {
579 // Ignore empty records.
580 if (isEmptyRecord(Context, I.getType(), true))
581 continue;
582
583 // If we already found an element then this isn't a single-element struct.
584 if (Found)
585 return nullptr;
586
587 // If this is non-empty and not a single element struct, the composite
588 // cannot be a single element struct.
589 Found = isSingleElementStruct(I.getType(), Context);
590 if (!Found)
591 return nullptr;
592 }
593 }
594
595 // Check for single element.
596 for (const auto *FD : RD->fields()) {
597 QualType FT = FD->getType();
598
599 // Ignore empty fields.
600 if (isEmptyField(Context, FD, true))
601 continue;
602
603 // If we already found an element then this isn't a single-element
604 // struct.
605 if (Found)
606 return nullptr;
607
608 // Treat single element arrays as the element.
609 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
610 if (AT->getSize().getZExtValue() != 1)
611 break;
612 FT = AT->getElementType();
613 }
614
615 if (!isAggregateTypeForABI(FT)) {
616 Found = FT.getTypePtr();
617 } else {
618 Found = isSingleElementStruct(FT, Context);
619 if (!Found)
620 return nullptr;
621 }
622 }
623
624 // We don't consider a struct a single-element struct if it has
625 // padding beyond the element type.
626 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
627 return nullptr;
628
629 return Found;
630}
631
632namespace {
633Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
634 const ABIArgInfo &AI) {
635 // This default implementation defers to the llvm backend's va_arg
636 // instruction. It can handle only passing arguments directly
637 // (typically only handled in the backend for primitive types), or
638 // aggregates passed indirectly by pointer (NOTE: if the "byval"
639 // flag has ABI impact in the callee, this implementation cannot
640 // work.)
641
642 // Only a few cases are covered here at the moment -- those needed
643 // by the default abi.
644 llvm::Value *Val;
645
646 if (AI.isIndirect()) {
647 assert(!AI.getPaddingType() &&((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 648, __PRETTY_FUNCTION__))
648 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!")((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 648, __PRETTY_FUNCTION__))
;
649 assert(((!AI.getIndirectRealign() && "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getIndirectRealign() && \"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 651, __PRETTY_FUNCTION__))
650 !AI.getIndirectRealign() &&((!AI.getIndirectRealign() && "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getIndirectRealign() && \"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 651, __PRETTY_FUNCTION__))
651 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!")((!AI.getIndirectRealign() && "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getIndirectRealign() && \"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 651, __PRETTY_FUNCTION__))
;
652
653 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
654 CharUnits TyAlignForABI = TyInfo.second;
655
656 llvm::Type *BaseTy =
657 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
658 llvm::Value *Addr =
659 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
660 return Address(Addr, TyAlignForABI);
661 } else {
662 assert((AI.isDirect() || AI.isExtend()) &&(((AI.isDirect() || AI.isExtend()) && "Unexpected ArgInfo Kind in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("(AI.isDirect() || AI.isExtend()) && \"Unexpected ArgInfo Kind in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 663, __PRETTY_FUNCTION__))
663 "Unexpected ArgInfo Kind in generic VAArg emitter!")(((AI.isDirect() || AI.isExtend()) && "Unexpected ArgInfo Kind in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("(AI.isDirect() || AI.isExtend()) && \"Unexpected ArgInfo Kind in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 663, __PRETTY_FUNCTION__))
;
664
665 assert(!AI.getInReg() &&((!AI.getInReg() && "Unexpected InReg seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getInReg() && \"Unexpected InReg seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 666, __PRETTY_FUNCTION__))
666 "Unexpected InReg seen in arginfo in generic VAArg emitter!")((!AI.getInReg() && "Unexpected InReg seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getInReg() && \"Unexpected InReg seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 666, __PRETTY_FUNCTION__))
;
667 assert(!AI.getPaddingType() &&((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 668, __PRETTY_FUNCTION__))
668 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!")((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 668, __PRETTY_FUNCTION__))
;
669 assert(!AI.getDirectOffset() &&((!AI.getDirectOffset() && "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getDirectOffset() && \"Unexpected DirectOffset seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 670, __PRETTY_FUNCTION__))
670 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!")((!AI.getDirectOffset() && "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getDirectOffset() && \"Unexpected DirectOffset seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 670, __PRETTY_FUNCTION__))
;
671 assert(!AI.getCoerceToType() &&((!AI.getCoerceToType() && "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getCoerceToType() && \"Unexpected CoerceToType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 672, __PRETTY_FUNCTION__))
672 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!")((!AI.getCoerceToType() && "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getCoerceToType() && \"Unexpected CoerceToType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 672, __PRETTY_FUNCTION__))
;
673
674 Address Temp = CGF.CreateMemTemp(Ty, "varet");
675 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
676 CGF.Builder.CreateStore(Val, Temp);
677 return Temp;
678 }
679}
680
681/// DefaultABIInfo - The default implementation for ABI specific
682/// details. This implementation provides information which results in
683/// self-consistent and sensible LLVM IR generation, but does not
684/// conform to any particular ABI.
685class DefaultABIInfo : public ABIInfo {
686public:
687 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
688
689 ABIArgInfo classifyReturnType(QualType RetTy) const;
690 ABIArgInfo classifyArgumentType(QualType RetTy) const;
691
692 void computeInfo(CGFunctionInfo &FI) const override {
693 if (!getCXXABI().classifyReturnType(FI))
694 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
695 for (auto &I : FI.arguments())
696 I.info = classifyArgumentType(I.type);
697 }
698
699 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
700 QualType Ty) const override {
701 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
702 }
703};
704
705class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
706public:
707 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
708 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
709};
710
711ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
712 Ty = useFirstFieldIfTransparentUnion(Ty);
713
714 if (isAggregateTypeForABI(Ty)) {
715 // Records with non-trivial destructors/copy-constructors should not be
716 // passed by value.
717 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
718 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
719
720 return getNaturalAlignIndirect(Ty);
721 }
722
723 // Treat an enum type as its underlying type.
724 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
725 Ty = EnumTy->getDecl()->getIntegerType();
726
727 ASTContext &Context = getContext();
728 if (const auto *EIT = Ty->getAs<ExtIntType>())
729 if (EIT->getNumBits() >
730 Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
731 ? Context.Int128Ty
732 : Context.LongLongTy))
733 return getNaturalAlignIndirect(Ty);
734
735 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
736 : ABIArgInfo::getDirect());
737}
738
739ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
740 if (RetTy->isVoidType())
741 return ABIArgInfo::getIgnore();
742
743 if (isAggregateTypeForABI(RetTy))
744 return getNaturalAlignIndirect(RetTy);
745
746 // Treat an enum type as its underlying type.
747 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
748 RetTy = EnumTy->getDecl()->getIntegerType();
749
750 if (const auto *EIT = RetTy->getAs<ExtIntType>())
751 if (EIT->getNumBits() >
752 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
753 ? getContext().Int128Ty
754 : getContext().LongLongTy))
755 return getNaturalAlignIndirect(RetTy);
756
757 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
758 : ABIArgInfo::getDirect());
759}
760
761//===----------------------------------------------------------------------===//
762// WebAssembly ABI Implementation
763//
764// This is a very simple ABI that relies a lot on DefaultABIInfo.
765//===----------------------------------------------------------------------===//
766
767class WebAssemblyABIInfo final : public SwiftABIInfo {
768public:
769 enum ABIKind {
770 MVP = 0,
771 ExperimentalMV = 1,
772 };
773
774private:
775 DefaultABIInfo defaultInfo;
776 ABIKind Kind;
777
778public:
779 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
780 : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
781
782private:
783 ABIArgInfo classifyReturnType(QualType RetTy) const;
784 ABIArgInfo classifyArgumentType(QualType Ty) const;
785
786 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
787 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
788 // overload them.
789 void computeInfo(CGFunctionInfo &FI) const override {
790 if (!getCXXABI().classifyReturnType(FI))
791 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
792 for (auto &Arg : FI.arguments())
793 Arg.info = classifyArgumentType(Arg.type);
794 }
795
796 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
797 QualType Ty) const override;
798
799 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
800 bool asReturnValue) const override {
801 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
802 }
803
804 bool isSwiftErrorInRegister() const override {
805 return false;
806 }
807};
808
809class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
810public:
811 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
812 WebAssemblyABIInfo::ABIKind K)
813 : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {}
814
815 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
816 CodeGen::CodeGenModule &CGM) const override {
817 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
818 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
819 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
820 llvm::Function *Fn = cast<llvm::Function>(GV);
821 llvm::AttrBuilder B;
822 B.addAttribute("wasm-import-module", Attr->getImportModule());
823 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
824 }
825 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
826 llvm::Function *Fn = cast<llvm::Function>(GV);
827 llvm::AttrBuilder B;
828 B.addAttribute("wasm-import-name", Attr->getImportName());
829 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
830 }
831 if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
832 llvm::Function *Fn = cast<llvm::Function>(GV);
833 llvm::AttrBuilder B;
834 B.addAttribute("wasm-export-name", Attr->getExportName());
835 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
836 }
837 }
838
839 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
840 llvm::Function *Fn = cast<llvm::Function>(GV);
841 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
842 Fn->addFnAttr("no-prototype");
843 }
844 }
845};
846
847/// Classify argument of given type \p Ty.
848ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
849 Ty = useFirstFieldIfTransparentUnion(Ty);
850
851 if (isAggregateTypeForABI(Ty)) {
852 // Records with non-trivial destructors/copy-constructors should not be
853 // passed by value.
854 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
855 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
856 // Ignore empty structs/unions.
857 if (isEmptyRecord(getContext(), Ty, true))
858 return ABIArgInfo::getIgnore();
859 // Lower single-element structs to just pass a regular value. TODO: We
860 // could do reasonable-size multiple-element structs too, using getExpand(),
861 // though watch out for things like bitfields.
862 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
863 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
864 // For the experimental multivalue ABI, fully expand all other aggregates
865 if (Kind == ABIKind::ExperimentalMV) {
866 const RecordType *RT = Ty->getAs<RecordType>();
867 assert(RT)((RT) ? static_cast<void> (0) : __assert_fail ("RT", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 867, __PRETTY_FUNCTION__))
;
868 bool HasBitField = false;
869 for (auto *Field : RT->getDecl()->fields()) {
870 if (Field->isBitField()) {
871 HasBitField = true;
872 break;
873 }
874 }
875 if (!HasBitField)
876 return ABIArgInfo::getExpand();
877 }
878 }
879
880 // Otherwise just do the default thing.
881 return defaultInfo.classifyArgumentType(Ty);
882}
883
884ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
885 if (isAggregateTypeForABI(RetTy)) {
886 // Records with non-trivial destructors/copy-constructors should not be
887 // returned by value.
888 if (!getRecordArgABI(RetTy, getCXXABI())) {
889 // Ignore empty structs/unions.
890 if (isEmptyRecord(getContext(), RetTy, true))
891 return ABIArgInfo::getIgnore();
892 // Lower single-element structs to just return a regular value. TODO: We
893 // could do reasonable-size multiple-element structs too, using
894 // ABIArgInfo::getDirect().
895 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
896 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
897 // For the experimental multivalue ABI, return all other aggregates
898 if (Kind == ABIKind::ExperimentalMV)
899 return ABIArgInfo::getDirect();
900 }
901 }
902
903 // Otherwise just do the default thing.
904 return defaultInfo.classifyReturnType(RetTy);
905}
906
907Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
908 QualType Ty) const {
909 bool IsIndirect = isAggregateTypeForABI(Ty) &&
910 !isEmptyRecord(getContext(), Ty, true) &&
911 !isSingleElementStruct(Ty, getContext());
912 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
913 getContext().getTypeInfoInChars(Ty),
914 CharUnits::fromQuantity(4),
915 /*AllowHigherAlign=*/true);
916}
917
918//===----------------------------------------------------------------------===//
919// le32/PNaCl bitcode ABI Implementation
920//
921// This is a simplified version of the x86_32 ABI. Arguments and return values
922// are always passed on the stack.
923//===----------------------------------------------------------------------===//
924
925class PNaClABIInfo : public ABIInfo {
926 public:
927 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
928
929 ABIArgInfo classifyReturnType(QualType RetTy) const;
930 ABIArgInfo classifyArgumentType(QualType RetTy) const;
931
932 void computeInfo(CGFunctionInfo &FI) const override;
933 Address EmitVAArg(CodeGenFunction &CGF,
934 Address VAListAddr, QualType Ty) const override;
935};
936
937class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
938 public:
939 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
940 : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
941};
942
943void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
944 if (!getCXXABI().classifyReturnType(FI))
945 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
946
947 for (auto &I : FI.arguments())
948 I.info = classifyArgumentType(I.type);
949}
950
951Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
952 QualType Ty) const {
953 // The PNaCL ABI is a bit odd, in that varargs don't use normal
954 // function classification. Structs get passed directly for varargs
955 // functions, through a rewriting transform in
956 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
957 // this target to actually support a va_arg instructions with an
958 // aggregate type, unlike other targets.
959 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
960}
961
962/// Classify argument of given type \p Ty.
963ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
964 if (isAggregateTypeForABI(Ty)) {
965 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
966 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
967 return getNaturalAlignIndirect(Ty);
968 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
969 // Treat an enum type as its underlying type.
970 Ty = EnumTy->getDecl()->getIntegerType();
971 } else if (Ty->isFloatingType()) {
972 // Floating-point types don't go inreg.
973 return ABIArgInfo::getDirect();
974 } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
975 // Treat extended integers as integers if <=64, otherwise pass indirectly.
976 if (EIT->getNumBits() > 64)
977 return getNaturalAlignIndirect(Ty);
978 return ABIArgInfo::getDirect();
979 }
980
981 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
982 : ABIArgInfo::getDirect());
983}
984
985ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
986 if (RetTy->isVoidType())
987 return ABIArgInfo::getIgnore();
988
989 // In the PNaCl ABI we always return records/structures on the stack.
990 if (isAggregateTypeForABI(RetTy))
991 return getNaturalAlignIndirect(RetTy);
992
993 // Treat extended integers as integers if <=64, otherwise pass indirectly.
994 if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
995 if (EIT->getNumBits() > 64)
996 return getNaturalAlignIndirect(RetTy);
997 return ABIArgInfo::getDirect();
998 }
999
1000 // Treat an enum type as its underlying type.
1001 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1002 RetTy = EnumTy->getDecl()->getIntegerType();
1003
1004 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1005 : ABIArgInfo::getDirect());
1006}
1007
1008/// IsX86_MMXType - Return true if this is an MMX type.
1009bool IsX86_MMXType(llvm::Type *IRType) {
1010 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
1011 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
1012 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
1013 IRType->getScalarSizeInBits() != 64;
1014}
1015
1016static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1017 StringRef Constraint,
1018 llvm::Type* Ty) {
1019 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
1020 .Cases("y", "&y", "^Ym", true)
1021 .Default(false);
1022 if (IsMMXCons && Ty->isVectorTy()) {
1023 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
1024 64) {
1025 // Invalid MMX constraint
1026 return nullptr;
1027 }
1028
1029 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
1030 }
1031
1032 // No operation needed
1033 return Ty;
1034}
1035
1036/// Returns true if this type can be passed in SSE registers with the
1037/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1038static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
1039 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1040 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
1041 if (BT->getKind() == BuiltinType::LongDouble) {
1042 if (&Context.getTargetInfo().getLongDoubleFormat() ==
1043 &llvm::APFloat::x87DoubleExtended())
1044 return false;
1045 }
1046 return true;
1047 }
1048 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
1049 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
1050 // registers specially.
1051 unsigned VecSize = Context.getTypeSize(VT);
1052 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
1053 return true;
1054 }
1055 return false;
1056}
1057
1058/// Returns true if this aggregate is small enough to be passed in SSE registers
1059/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1060static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
1061 return NumMembers <= 4;
1062}
1063
1064/// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
1065static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
1066 auto AI = ABIArgInfo::getDirect(T);
1067 AI.setInReg(true);
1068 AI.setCanBeFlattened(false);
1069 return AI;
1070}
1071
1072//===----------------------------------------------------------------------===//
1073// X86-32 ABI Implementation
1074//===----------------------------------------------------------------------===//
1075
1076/// Similar to llvm::CCState, but for Clang.
1077struct CCState {
1078 CCState(CGFunctionInfo &FI)
1079 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
1080
1081 llvm::SmallBitVector IsPreassigned;
1082 unsigned CC = CallingConv::CC_C;
1083 unsigned FreeRegs = 0;
1084 unsigned FreeSSERegs = 0;
1085};
1086
1087enum {
1088 // Vectorcall only allows the first 6 parameters to be passed in registers.
1089 VectorcallMaxParamNumAsReg = 6
1090};
1091
1092/// X86_32ABIInfo - The X86-32 ABI information.
1093class X86_32ABIInfo : public SwiftABIInfo {
1094 enum Class {
1095 Integer,
1096 Float
1097 };
1098
1099 static const unsigned MinABIStackAlignInBytes = 4;
1100
1101 bool IsDarwinVectorABI;
1102 bool IsRetSmallStructInRegABI;
1103 bool IsWin32StructABI;
1104 bool IsSoftFloatABI;
1105 bool IsMCUABI;
1106 unsigned DefaultNumRegisterParameters;
1107
1108 static bool isRegisterSize(unsigned Size) {
1109 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1110 }
1111
1112 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1113 // FIXME: Assumes vectorcall is in use.
1114 return isX86VectorTypeForVectorCall(getContext(), Ty);
1115 }
1116
1117 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1118 uint64_t NumMembers) const override {
1119 // FIXME: Assumes vectorcall is in use.
1120 return isX86VectorCallAggregateSmallEnough(NumMembers);
1121 }
1122
1123 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1124
1125 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1126 /// such that the argument will be passed in memory.
1127 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1128
1129 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1130
1131 /// Return the alignment to use for the given type on the stack.
1132 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1133
1134 Class classify(QualType Ty) const;
1135 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1136 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1137
1138 /// Updates the number of available free registers, returns
1139 /// true if any registers were allocated.
1140 bool updateFreeRegs(QualType Ty, CCState &State) const;
1141
1142 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1143 bool &NeedsPadding) const;
1144 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1145
1146 bool canExpandIndirectArgument(QualType Ty) const;
1147
1148 /// Rewrite the function info so that all memory arguments use
1149 /// inalloca.
1150 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1151
1152 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1153 CharUnits &StackOffset, ABIArgInfo &Info,
1154 QualType Type) const;
1155 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
1156
1157public:
1158
1159 void computeInfo(CGFunctionInfo &FI) const override;
1160 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1161 QualType Ty) const override;
1162
1163 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1164 bool RetSmallStructInRegABI, bool Win32StructABI,
1165 unsigned NumRegisterParameters, bool SoftFloatABI)
1166 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1167 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1168 IsWin32StructABI(Win32StructABI),
1169 IsSoftFloatABI(SoftFloatABI),
1170 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1171 DefaultNumRegisterParameters(NumRegisterParameters) {}
1172
1173 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1174 bool asReturnValue) const override {
1175 // LLVM's x86-32 lowering currently only assigns up to three
1176 // integer registers and three fp registers. Oddly, it'll use up to
1177 // four vector registers for vectors, but those can overlap with the
1178 // scalar registers.
1179 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1180 }
1181
1182 bool isSwiftErrorInRegister() const override {
1183 // x86-32 lowering does not support passing swifterror in a register.
1184 return false;
1185 }
1186};
1187
1188class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1189public:
1190 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1191 bool RetSmallStructInRegABI, bool Win32StructABI,
1192 unsigned NumRegisterParameters, bool SoftFloatABI)
1193 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
1194 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1195 NumRegisterParameters, SoftFloatABI)) {}
1196
1197 static bool isStructReturnInRegABI(
1198 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1199
1200 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1201 CodeGen::CodeGenModule &CGM) const override;
1202
1203 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1204 // Darwin uses different dwarf register numbers for EH.
1205 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1206 return 4;
1207 }
1208
1209 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1210 llvm::Value *Address) const override;
1211
1212 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1213 StringRef Constraint,
1214 llvm::Type* Ty) const override {
1215 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1216 }
1217
1218 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1219 std::string &Constraints,
1220 std::vector<llvm::Type *> &ResultRegTypes,
1221 std::vector<llvm::Type *> &ResultTruncRegTypes,
1222 std::vector<LValue> &ResultRegDests,
1223 std::string &AsmString,
1224 unsigned NumOutputs) const override;
1225
1226 llvm::Constant *
1227 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1228 unsigned Sig = (0xeb << 0) | // jmp rel8
1229 (0x06 << 8) | // .+0x08
1230 ('v' << 16) |
1231 ('2' << 24);
1232 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1233 }
1234
1235 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1236 return "movl\t%ebp, %ebp"
1237 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1238 }
1239};
1240
1241}
1242
1243/// Rewrite input constraint references after adding some output constraints.
1244/// In the case where there is one output and one input and we add one output,
1245/// we need to replace all operand references greater than or equal to 1:
1246/// mov $0, $1
1247/// mov eax, $1
1248/// The result will be:
1249/// mov $0, $2
1250/// mov eax, $2
1251static void rewriteInputConstraintReferences(unsigned FirstIn,
1252 unsigned NumNewOuts,
1253 std::string &AsmString) {
1254 std::string Buf;
1255 llvm::raw_string_ostream OS(Buf);
1256 size_t Pos = 0;
1257 while (Pos < AsmString.size()) {
1258 size_t DollarStart = AsmString.find('$', Pos);
1259 if (DollarStart == std::string::npos)
1260 DollarStart = AsmString.size();
1261 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1262 if (DollarEnd == std::string::npos)
1263 DollarEnd = AsmString.size();
1264 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1265 Pos = DollarEnd;
1266 size_t NumDollars = DollarEnd - DollarStart;
1267 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1268 // We have an operand reference.
1269 size_t DigitStart = Pos;
1270 if (AsmString[DigitStart] == '{') {
1271 OS << '{';
1272 ++DigitStart;
1273 }
1274 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1275 if (DigitEnd == std::string::npos)
1276 DigitEnd = AsmString.size();
1277 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1278 unsigned OperandIndex;
1279 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1280 if (OperandIndex >= FirstIn)
1281 OperandIndex += NumNewOuts;
1282 OS << OperandIndex;
1283 } else {
1284 OS << OperandStr;
1285 }
1286 Pos = DigitEnd;
1287 }
1288 }
1289 AsmString = std::move(OS.str());
1290}
1291
1292/// Add output constraints for EAX:EDX because they are return registers.
1293void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1294 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1295 std::vector<llvm::Type *> &ResultRegTypes,
1296 std::vector<llvm::Type *> &ResultTruncRegTypes,
1297 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1298 unsigned NumOutputs) const {
1299 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1300
1301 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1302 // larger.
1303 if (!Constraints.empty())
1304 Constraints += ',';
1305 if (RetWidth <= 32) {
1306 Constraints += "={eax}";
1307 ResultRegTypes.push_back(CGF.Int32Ty);
1308 } else {
1309 // Use the 'A' constraint for EAX:EDX.
1310 Constraints += "=A";
1311 ResultRegTypes.push_back(CGF.Int64Ty);
1312 }
1313
1314 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1315 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1316 ResultTruncRegTypes.push_back(CoerceTy);
1317
1318 // Coerce the integer by bitcasting the return slot pointer.
1319 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF),
1320 CoerceTy->getPointerTo()));
1321 ResultRegDests.push_back(ReturnSlot);
1322
1323 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1324}
1325
1326/// shouldReturnTypeInRegister - Determine if the given type should be
1327/// returned in a register (for the Darwin and MCU ABI).
1328bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1329 ASTContext &Context) const {
1330 uint64_t Size = Context.getTypeSize(Ty);
1331
1332 // For i386, type must be register sized.
1333 // For the MCU ABI, it only needs to be <= 8-byte
1334 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1335 return false;
1336
1337 if (Ty->isVectorType()) {
1338 // 64- and 128- bit vectors inside structures are not returned in
1339 // registers.
1340 if (Size == 64 || Size == 128)
1341 return false;
1342
1343 return true;
1344 }
1345
1346 // If this is a builtin, pointer, enum, complex type, member pointer, or
1347 // member function pointer it is ok.
1348 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1349 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1350 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1351 return true;
1352
1353 // Arrays are treated like records.
1354 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1355 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1356
1357 // Otherwise, it must be a record type.
1358 const RecordType *RT = Ty->getAs<RecordType>();
1359 if (!RT) return false;
1360
1361 // FIXME: Traverse bases here too.
1362
1363 // Structure types are passed in register if all fields would be
1364 // passed in a register.
1365 for (const auto *FD : RT->getDecl()->fields()) {
1366 // Empty fields are ignored.
1367 if (isEmptyField(Context, FD, true))
1368 continue;
1369
1370 // Check fields recursively.
1371 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1372 return false;
1373 }
1374 return true;
1375}
1376
1377static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1378 // Treat complex types as the element type.
1379 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1380 Ty = CTy->getElementType();
1381
1382 // Check for a type which we know has a simple scalar argument-passing
1383 // convention without any padding. (We're specifically looking for 32
1384 // and 64-bit integer and integer-equivalents, float, and double.)
1385 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1386 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1387 return false;
1388
1389 uint64_t Size = Context.getTypeSize(Ty);
1390 return Size == 32 || Size == 64;
1391}
1392
1393static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1394 uint64_t &Size) {
1395 for (const auto *FD : RD->fields()) {
1396 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1397 // argument is smaller than 32-bits, expanding the struct will create
1398 // alignment padding.
1399 if (!is32Or64BitBasicType(FD->getType(), Context))
1400 return false;
1401
1402 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1403 // how to expand them yet, and the predicate for telling if a bitfield still
1404 // counts as "basic" is more complicated than what we were doing previously.
1405 if (FD->isBitField())
1406 return false;
1407
1408 Size += Context.getTypeSize(FD->getType());
1409 }
1410 return true;
1411}
1412
1413static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1414 uint64_t &Size) {
1415 // Don't do this if there are any non-empty bases.
1416 for (const CXXBaseSpecifier &Base : RD->bases()) {
1417 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1418 Size))
1419 return false;
1420 }
1421 if (!addFieldSizes(Context, RD, Size))
1422 return false;
1423 return true;
1424}
1425
1426/// Test whether an argument type which is to be passed indirectly (on the
1427/// stack) would have the equivalent layout if it was expanded into separate
1428/// arguments. If so, we prefer to do the latter to avoid inhibiting
1429/// optimizations.
1430bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1431 // We can only expand structure types.
1432 const RecordType *RT = Ty->getAs<RecordType>();
1433 if (!RT)
1434 return false;
1435 const RecordDecl *RD = RT->getDecl();
1436 uint64_t Size = 0;
1437 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1438 if (!IsWin32StructABI) {
1439 // On non-Windows, we have to conservatively match our old bitcode
1440 // prototypes in order to be ABI-compatible at the bitcode level.
1441 if (!CXXRD->isCLike())
1442 return false;
1443 } else {
1444 // Don't do this for dynamic classes.
1445 if (CXXRD->isDynamicClass())
1446 return false;
1447 }
1448 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1449 return false;
1450 } else {
1451 if (!addFieldSizes(getContext(), RD, Size))
1452 return false;
1453 }
1454
1455 // We can do this if there was no alignment padding.
1456 return Size == getContext().getTypeSize(Ty);
1457}
1458
1459ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1460 // If the return value is indirect, then the hidden argument is consuming one
1461 // integer register.
1462 if (State.FreeRegs) {
1463 --State.FreeRegs;
1464 if (!IsMCUABI)
1465 return getNaturalAlignIndirectInReg(RetTy);
1466 }
1467 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1468}
1469
1470ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1471 CCState &State) const {
1472 if (RetTy->isVoidType())
1473 return ABIArgInfo::getIgnore();
1474
1475 const Type *Base = nullptr;
1476 uint64_t NumElts = 0;
1477 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1478 State.CC == llvm::CallingConv::X86_RegCall) &&
1479 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1480 // The LLVM struct type for such an aggregate should lower properly.
1481 return ABIArgInfo::getDirect();
1482 }
1483
1484 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1485 // On Darwin, some vectors are returned in registers.
1486 if (IsDarwinVectorABI) {
1487 uint64_t Size = getContext().getTypeSize(RetTy);
1488
1489 // 128-bit vectors are a special case; they are returned in
1490 // registers and we need to make sure to pick a type the LLVM
1491 // backend will like.
1492 if (Size == 128)
1493 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
1494 llvm::Type::getInt64Ty(getVMContext()), 2));
1495
1496 // Always return in register if it fits in a general purpose
1497 // register, or if it is 64 bits and has a single element.
1498 if ((Size == 8 || Size == 16 || Size == 32) ||
1499 (Size == 64 && VT->getNumElements() == 1))
1500 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1501 Size));
1502
1503 return getIndirectReturnResult(RetTy, State);
1504 }
1505
1506 return ABIArgInfo::getDirect();
1507 }
1508
1509 if (isAggregateTypeForABI(RetTy)) {
1510 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1511 // Structures with flexible arrays are always indirect.
1512 if (RT->getDecl()->hasFlexibleArrayMember())
1513 return getIndirectReturnResult(RetTy, State);
1514 }
1515
1516 // If specified, structs and unions are always indirect.
1517 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1518 return getIndirectReturnResult(RetTy, State);
1519
1520 // Ignore empty structs/unions.
1521 if (isEmptyRecord(getContext(), RetTy, true))
1522 return ABIArgInfo::getIgnore();
1523
1524 // Small structures which are register sized are generally returned
1525 // in a register.
1526 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1527 uint64_t Size = getContext().getTypeSize(RetTy);
1528
1529 // As a special-case, if the struct is a "single-element" struct, and
1530 // the field is of type "float" or "double", return it in a
1531 // floating-point register. (MSVC does not apply this special case.)
1532 // We apply a similar transformation for pointer types to improve the
1533 // quality of the generated IR.
1534 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1535 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1536 || SeltTy->hasPointerRepresentation())
1537 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1538
1539 // FIXME: We should be able to narrow this integer in cases with dead
1540 // padding.
1541 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1542 }
1543
1544 return getIndirectReturnResult(RetTy, State);
1545 }
1546
1547 // Treat an enum type as its underlying type.
1548 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1549 RetTy = EnumTy->getDecl()->getIntegerType();
1550
1551 if (const auto *EIT = RetTy->getAs<ExtIntType>())
1552 if (EIT->getNumBits() > 64)
1553 return getIndirectReturnResult(RetTy, State);
1554
1555 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1556 : ABIArgInfo::getDirect());
1557}
1558
1559static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
1560 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1561}
1562
1563static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
1564 const RecordType *RT = Ty->getAs<RecordType>();
1565 if (!RT)
1566 return 0;
1567 const RecordDecl *RD = RT->getDecl();
1568
1569 // If this is a C++ record, check the bases first.
1570 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1571 for (const auto &I : CXXRD->bases())
1572 if (!isRecordWithSIMDVectorType(Context, I.getType()))
1573 return false;
1574
1575 for (const auto *i : RD->fields()) {
1576 QualType FT = i->getType();
1577
1578 if (isSIMDVectorType(Context, FT))
1579 return true;
1580
1581 if (isRecordWithSIMDVectorType(Context, FT))
1582 return true;
1583 }
1584
1585 return false;
1586}
1587
1588unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1589 unsigned Align) const {
1590 // Otherwise, if the alignment is less than or equal to the minimum ABI
1591 // alignment, just use the default; the backend will handle this.
1592 if (Align <= MinABIStackAlignInBytes)
1593 return 0; // Use default alignment.
1594
1595 // On non-Darwin, the stack type alignment is always 4.
1596 if (!IsDarwinVectorABI) {
1597 // Set explicit alignment, since we may need to realign the top.
1598 return MinABIStackAlignInBytes;
1599 }
1600
1601 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1602 if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
1603 isRecordWithSIMDVectorType(getContext(), Ty)))
1604 return 16;
1605
1606 return MinABIStackAlignInBytes;
1607}
1608
1609ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1610 CCState &State) const {
1611 if (!ByVal) {
1612 if (State.FreeRegs) {
1613 --State.FreeRegs; // Non-byval indirects just use one pointer.
1614 if (!IsMCUABI)
1615 return getNaturalAlignIndirectInReg(Ty);
1616 }
1617 return getNaturalAlignIndirect(Ty, false);
1618 }
1619
1620 // Compute the byval alignment.
1621 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1622 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1623 if (StackAlign == 0)
1624 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1625
1626 // If the stack alignment is less than the type alignment, realign the
1627 // argument.
1628 bool Realign = TypeAlign > StackAlign;
1629 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1630 /*ByVal=*/true, Realign);
1631}
1632
1633X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1634 const Type *T = isSingleElementStruct(Ty, getContext());
1635 if (!T)
1636 T = Ty.getTypePtr();
1637
1638 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1639 BuiltinType::Kind K = BT->getKind();
1640 if (K == BuiltinType::Float || K == BuiltinType::Double)
1641 return Float;
1642 }
1643 return Integer;
1644}
1645
1646bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1647 if (!IsSoftFloatABI) {
1648 Class C = classify(Ty);
1649 if (C == Float)
1650 return false;
1651 }
1652
1653 unsigned Size = getContext().getTypeSize(Ty);
1654 unsigned SizeInRegs = (Size + 31) / 32;
1655
1656 if (SizeInRegs == 0)
1657 return false;
1658
1659 if (!IsMCUABI) {
1660 if (SizeInRegs > State.FreeRegs) {
1661 State.FreeRegs = 0;
1662 return false;
1663 }
1664 } else {
1665 // The MCU psABI allows passing parameters in-reg even if there are
1666 // earlier parameters that are passed on the stack. Also,
1667 // it does not allow passing >8-byte structs in-register,
1668 // even if there are 3 free registers available.
1669 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1670 return false;
1671 }
1672
1673 State.FreeRegs -= SizeInRegs;
1674 return true;
1675}
1676
1677bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1678 bool &InReg,
1679 bool &NeedsPadding) const {
1680 // On Windows, aggregates other than HFAs are never passed in registers, and
1681 // they do not consume register slots. Homogenous floating-point aggregates
1682 // (HFAs) have already been dealt with at this point.
1683 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1684 return false;
1685
1686 NeedsPadding = false;
1687 InReg = !IsMCUABI;
1688
1689 if (!updateFreeRegs(Ty, State))
1690 return false;
1691
1692 if (IsMCUABI)
1693 return true;
1694
1695 if (State.CC == llvm::CallingConv::X86_FastCall ||
1696 State.CC == llvm::CallingConv::X86_VectorCall ||
1697 State.CC == llvm::CallingConv::X86_RegCall) {
1698 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1699 NeedsPadding = true;
1700
1701 return false;
1702 }
1703
1704 return true;
1705}
1706
1707bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1708 if (!updateFreeRegs(Ty, State))
1709 return false;
1710
1711 if (IsMCUABI)
1712 return false;
1713
1714 if (State.CC == llvm::CallingConv::X86_FastCall ||
1715 State.CC == llvm::CallingConv::X86_VectorCall ||
1716 State.CC == llvm::CallingConv::X86_RegCall) {
1717 if (getContext().getTypeSize(Ty) > 32)
1718 return false;
1719
1720 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1721 Ty->isReferenceType());
1722 }
1723
1724 return true;
1725}
1726
1727void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
1728 // Vectorcall x86 works subtly different than in x64, so the format is
1729 // a bit different than the x64 version. First, all vector types (not HVAs)
1730 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
1731 // This differs from the x64 implementation, where the first 6 by INDEX get
1732 // registers.
1733 // In the second pass over the arguments, HVAs are passed in the remaining
1734 // vector registers if possible, or indirectly by address. The address will be
1735 // passed in ECX/EDX if available. Any other arguments are passed according to
1736 // the usual fastcall rules.
1737 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1738 for (int I = 0, E = Args.size(); I < E; ++I) {
1739 const Type *Base = nullptr;
1740 uint64_t NumElts = 0;
1741 const QualType &Ty = Args[I].type;
1742 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1743 isHomogeneousAggregate(Ty, Base, NumElts)) {
1744 if (State.FreeSSERegs >= NumElts) {
1745 State.FreeSSERegs -= NumElts;
1746 Args[I].info = ABIArgInfo::getDirectInReg();
1747 State.IsPreassigned.set(I);
1748 }
1749 }
1750 }
1751}
1752
1753ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1754 CCState &State) const {
1755 // FIXME: Set alignment on indirect arguments.
1756 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
1757 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
1758 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
1759
1760 Ty = useFirstFieldIfTransparentUnion(Ty);
1761 TypeInfo TI = getContext().getTypeInfo(Ty);
1762
1763 // Check with the C++ ABI first.
1764 const RecordType *RT = Ty->getAs<RecordType>();
1765 if (RT) {
1766 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1767 if (RAA == CGCXXABI::RAA_Indirect) {
1768 return getIndirectResult(Ty, false, State);
1769 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1770 // The field index doesn't matter, we'll fix it up later.
1771 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1772 }
1773 }
1774
1775 // Regcall uses the concept of a homogenous vector aggregate, similar
1776 // to other targets.
1777 const Type *Base = nullptr;
1778 uint64_t NumElts = 0;
1779 if ((IsRegCall || IsVectorCall) &&
1780 isHomogeneousAggregate(Ty, Base, NumElts)) {
1781 if (State.FreeSSERegs >= NumElts) {
1782 State.FreeSSERegs -= NumElts;
1783
1784 // Vectorcall passes HVAs directly and does not flatten them, but regcall
1785 // does.
1786 if (IsVectorCall)
1787 return getDirectX86Hva();
1788
1789 if (Ty->isBuiltinType() || Ty->isVectorType())
1790 return ABIArgInfo::getDirect();
1791 return ABIArgInfo::getExpand();
1792 }
1793 return getIndirectResult(Ty, /*ByVal=*/false, State);
1794 }
1795
1796 if (isAggregateTypeForABI(Ty)) {
1797 // Structures with flexible arrays are always indirect.
1798 // FIXME: This should not be byval!
1799 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1800 return getIndirectResult(Ty, true, State);
1801
1802 // Ignore empty structs/unions on non-Windows.
1803 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1804 return ABIArgInfo::getIgnore();
1805
1806 llvm::LLVMContext &LLVMContext = getVMContext();
1807 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1808 bool NeedsPadding = false;
1809 bool InReg;
1810 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1811 unsigned SizeInRegs = (TI.Width + 31) / 32;
1812 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1813 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1814 if (InReg)
1815 return ABIArgInfo::getDirectInReg(Result);
1816 else
1817 return ABIArgInfo::getDirect(Result);
1818 }
1819 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1820
1821 // Pass over-aligned aggregates on Windows indirectly. This behavior was
1822 // added in MSVC 2015.
1823 if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32)
1824 return getIndirectResult(Ty, /*ByVal=*/false, State);
1825
1826 // Expand small (<= 128-bit) record types when we know that the stack layout
1827 // of those arguments will match the struct. This is important because the
1828 // LLVM backend isn't smart enough to remove byval, which inhibits many
1829 // optimizations.
1830 // Don't do this for the MCU if there are still free integer registers
1831 // (see X86_64 ABI for full explanation).
1832 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
1833 canExpandIndirectArgument(Ty))
1834 return ABIArgInfo::getExpandWithPadding(
1835 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1836
1837 return getIndirectResult(Ty, true, State);
1838 }
1839
1840 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1841 // On Windows, vectors are passed directly if registers are available, or
1842 // indirectly if not. This avoids the need to align argument memory. Pass
1843 // user-defined vector types larger than 512 bits indirectly for simplicity.
1844 if (IsWin32StructABI) {
1845 if (TI.Width <= 512 && State.FreeSSERegs > 0) {
1846 --State.FreeSSERegs;
1847 return ABIArgInfo::getDirectInReg();
1848 }
1849 return getIndirectResult(Ty, /*ByVal=*/false, State);
1850 }
1851
1852 // On Darwin, some vectors are passed in memory, we handle this by passing
1853 // it as an i8/i16/i32/i64.
1854 if (IsDarwinVectorABI) {
1855 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
1856 (TI.Width == 64 && VT->getNumElements() == 1))
1857 return ABIArgInfo::getDirect(
1858 llvm::IntegerType::get(getVMContext(), TI.Width));
1859 }
1860
1861 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1862 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1863
1864 return ABIArgInfo::getDirect();
1865 }
1866
1867
1868 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1869 Ty = EnumTy->getDecl()->getIntegerType();
1870
1871 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1872
1873 if (isPromotableIntegerTypeForABI(Ty)) {
1874 if (InReg)
1875 return ABIArgInfo::getExtendInReg(Ty);
1876 return ABIArgInfo::getExtend(Ty);
1877 }
1878
1879 if (const auto * EIT = Ty->getAs<ExtIntType>()) {
1880 if (EIT->getNumBits() <= 64) {
1881 if (InReg)
1882 return ABIArgInfo::getDirectInReg();
1883 return ABIArgInfo::getDirect();
1884 }
1885 return getIndirectResult(Ty, /*ByVal=*/false, State);
1886 }
1887
1888 if (InReg)
1889 return ABIArgInfo::getDirectInReg();
1890 return ABIArgInfo::getDirect();
1891}
1892
1893void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1894 CCState State(FI);
1895 if (IsMCUABI)
1896 State.FreeRegs = 3;
1897 else if (State.CC == llvm::CallingConv::X86_FastCall) {
1898 State.FreeRegs = 2;
1899 State.FreeSSERegs = 3;
1900 } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1901 State.FreeRegs = 2;
1902 State.FreeSSERegs = 6;
1903 } else if (FI.getHasRegParm())
1904 State.FreeRegs = FI.getRegParm();
1905 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1906 State.FreeRegs = 5;
1907 State.FreeSSERegs = 8;
1908 } else if (IsWin32StructABI) {
1909 // Since MSVC 2015, the first three SSE vectors have been passed in
1910 // registers. The rest are passed indirectly.
1911 State.FreeRegs = DefaultNumRegisterParameters;
1912 State.FreeSSERegs = 3;
1913 } else
1914 State.FreeRegs = DefaultNumRegisterParameters;
1915
1916 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1917 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1918 } else if (FI.getReturnInfo().isIndirect()) {
1919 // The C++ ABI is not aware of register usage, so we have to check if the
1920 // return value was sret and put it in a register ourselves if appropriate.
1921 if (State.FreeRegs) {
1922 --State.FreeRegs; // The sret parameter consumes a register.
1923 if (!IsMCUABI)
1924 FI.getReturnInfo().setInReg(true);
1925 }
1926 }
1927
1928 // The chain argument effectively gives us another free register.
1929 if (FI.isChainCall())
1930 ++State.FreeRegs;
1931
1932 // For vectorcall, do a first pass over the arguments, assigning FP and vector
1933 // arguments to XMM registers as available.
1934 if (State.CC == llvm::CallingConv::X86_VectorCall)
1935 runVectorCallFirstPass(FI, State);
1936
1937 bool UsedInAlloca = false;
1938 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1939 for (int I = 0, E = Args.size(); I < E; ++I) {
1940 // Skip arguments that have already been assigned.
1941 if (State.IsPreassigned.test(I))
1942 continue;
1943
1944 Args[I].info = classifyArgumentType(Args[I].type, State);
1945 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
1946 }
1947
1948 // If we needed to use inalloca for any argument, do a second pass and rewrite
1949 // all the memory arguments to use inalloca.
1950 if (UsedInAlloca)
1951 rewriteWithInAlloca(FI);
1952}
1953
1954void
1955X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1956 CharUnits &StackOffset, ABIArgInfo &Info,
1957 QualType Type) const {
1958 // Arguments are always 4-byte-aligned.
1959 CharUnits WordSize = CharUnits::fromQuantity(4);
1960 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct")((StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct"
) ? static_cast<void> (0) : __assert_fail ("StackOffset.isMultipleOf(WordSize) && \"unaligned inalloca struct\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 1960, __PRETTY_FUNCTION__))
;
1961
1962 // sret pointers and indirect things will require an extra pointer
1963 // indirection, unless they are byval. Most things are byval, and will not
1964 // require this indirection.
1965 bool IsIndirect = false;
1966 if (Info.isIndirect() && !Info.getIndirectByVal())
1967 IsIndirect = true;
1968 Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
1969 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
1970 if (IsIndirect)
1971 LLTy = LLTy->getPointerTo(0);
1972 FrameFields.push_back(LLTy);
1973 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
1974
1975 // Insert padding bytes to respect alignment.
1976 CharUnits FieldEnd = StackOffset;
1977 StackOffset = FieldEnd.alignTo(WordSize);
1978 if (StackOffset != FieldEnd) {
1979 CharUnits NumBytes = StackOffset - FieldEnd;
1980 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1981 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1982 FrameFields.push_back(Ty);
1983 }
1984}
1985
1986static bool isArgInAlloca(const ABIArgInfo &Info) {
1987 // Leave ignored and inreg arguments alone.
1988 switch (Info.getKind()) {
1989 case ABIArgInfo::InAlloca:
1990 return true;
1991 case ABIArgInfo::Ignore:
1992 return false;
1993 case ABIArgInfo::Indirect:
1994 case ABIArgInfo::Direct:
1995 case ABIArgInfo::Extend:
1996 return !Info.getInReg();
1997 case ABIArgInfo::Expand:
1998 case ABIArgInfo::CoerceAndExpand:
1999 // These are aggregate types which are never passed in registers when
2000 // inalloca is involved.
2001 return true;
2002 }
2003 llvm_unreachable("invalid enum")::llvm::llvm_unreachable_internal("invalid enum", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 2003)
;
2004}
2005
2006void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
2007 assert(IsWin32StructABI && "inalloca only supported on win32")((IsWin32StructABI && "inalloca only supported on win32"
) ? static_cast<void> (0) : __assert_fail ("IsWin32StructABI && \"inalloca only supported on win32\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 2007, __PRETTY_FUNCTION__))
;
2008
2009 // Build a packed struct type for all of the arguments in memory.
2010 SmallVector<llvm::Type *, 6> FrameFields;
2011
2012 // The stack alignment is always 4.
2013 CharUnits StackAlign = CharUnits::fromQuantity(4);
2014
2015 CharUnits StackOffset;
2016 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
2017
2018 // Put 'this' into the struct before 'sret', if necessary.
2019 bool IsThisCall =
2020 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
2021 ABIArgInfo &Ret = FI.getReturnInfo();
2022 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
2023 isArgInAlloca(I->info)) {
2024 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2025 ++I;
2026 }
2027
2028 // Put the sret parameter into the inalloca struct if it's in memory.
2029 if (Ret.isIndirect() && !Ret.getInReg()) {
2030 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
2031 // On Windows, the hidden sret parameter is always returned in eax.
2032 Ret.setInAllocaSRet(IsWin32StructABI);
2033 }
2034
2035 // Skip the 'this' parameter in ecx.
2036 if (IsThisCall)
2037 ++I;
2038
2039 // Put arguments passed in memory into the struct.
2040 for (; I != E; ++I) {
2041 if (isArgInAlloca(I->info))
2042 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2043 }
2044
2045 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
2046 /*isPacked=*/true),
2047 StackAlign);
2048}
2049
2050Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
2051 Address VAListAddr, QualType Ty) const {
2052
2053 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
2054
2055 // x86-32 changes the alignment of certain arguments on the stack.
2056 //
2057 // Just messing with TypeInfo like this works because we never pass
2058 // anything indirectly.
2059 TypeInfo.second = CharUnits::fromQuantity(
2060 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
2061
2062 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
2063 TypeInfo, CharUnits::fromQuantity(4),
2064 /*AllowHigherAlign*/ true);
2065}
2066
2067bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
2068 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
2069 assert(Triple.getArch() == llvm::Triple::x86)((Triple.getArch() == llvm::Triple::x86) ? static_cast<void
> (0) : __assert_fail ("Triple.getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 2069, __PRETTY_FUNCTION__))
;
2070
2071 switch (Opts.getStructReturnConvention()) {
2072 case CodeGenOptions::SRCK_Default:
2073 break;
2074 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
2075 return false;
2076 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
2077 return true;
2078 }
2079
2080 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
2081 return true;
2082
2083 switch (Triple.getOS()) {
2084 case llvm::Triple::DragonFly:
2085 case llvm::Triple::FreeBSD:
2086 case llvm::Triple::OpenBSD:
2087 case llvm::Triple::Win32:
2088 return true;
2089 default:
2090 return false;
2091 }
2092}
2093
2094void X86_32TargetCodeGenInfo::setTargetAttributes(
2095 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2096 if (GV->isDeclaration())
2097 return;
2098 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2099 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2100 llvm::Function *Fn = cast<llvm::Function>(GV);
2101 Fn->addFnAttr("stackrealign");
2102 }
2103 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2104 llvm::Function *Fn = cast<llvm::Function>(GV);
2105 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2106 }
2107 }
2108}
2109
2110bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2111 CodeGen::CodeGenFunction &CGF,
2112 llvm::Value *Address) const {
2113 CodeGen::CGBuilderTy &Builder = CGF.Builder;
2114
2115 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
2116
2117 // 0-7 are the eight integer registers; the order is different
2118 // on Darwin (for EH), but the range is the same.
2119 // 8 is %eip.
2120 AssignToArrayRange(Builder, Address, Four8, 0, 8);
2121
2122 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2123 // 12-16 are st(0..4). Not sure why we stop at 4.
2124 // These have size 16, which is sizeof(long double) on
2125 // platforms with 8-byte alignment for that type.
2126 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2127 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2128
2129 } else {
2130 // 9 is %eflags, which doesn't get a size on Darwin for some
2131 // reason.
2132 Builder.CreateAlignedStore(
2133 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2134 CharUnits::One());
2135
2136 // 11-16 are st(0..5). Not sure why we stop at 5.
2137 // These have size 12, which is sizeof(long double) on
2138 // platforms with 4-byte alignment for that type.
2139 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2140 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2141 }
2142
2143 return false;
2144}
2145
2146//===----------------------------------------------------------------------===//
2147// X86-64 ABI Implementation
2148//===----------------------------------------------------------------------===//
2149
2150
2151namespace {
2152/// The AVX ABI level for X86 targets.
2153enum class X86AVXABILevel {
2154 None,
2155 AVX,
2156 AVX512
2157};
2158
2159/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
2160static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2161 switch (AVXLevel) {
2162 case X86AVXABILevel::AVX512:
2163 return 512;
2164 case X86AVXABILevel::AVX:
2165 return 256;
2166 case X86AVXABILevel::None:
2167 return 128;
2168 }
2169 llvm_unreachable("Unknown AVXLevel")::llvm::llvm_unreachable_internal("Unknown AVXLevel", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 2169)
;
2170}
2171
2172/// X86_64ABIInfo - The X86_64 ABI information.
2173class X86_64ABIInfo : public SwiftABIInfo {
2174 enum Class {
2175 Integer = 0,
2176 SSE,
2177 SSEUp,
2178 X87,
2179 X87Up,
2180 ComplexX87,
2181 NoClass,
2182 Memory
2183 };
2184
2185 /// merge - Implement the X86_64 ABI merging algorithm.
2186 ///
2187 /// Merge an accumulating classification \arg Accum with a field
2188 /// classification \arg Field.
2189 ///
2190 /// \param Accum - The accumulating classification. This should
2191 /// always be either NoClass or the result of a previous merge
2192 /// call. In addition, this should never be Memory (the caller
2193 /// should just return Memory for the aggregate).
2194 static Class merge(Class Accum, Class Field);
2195
2196 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2197 ///
2198 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2199 /// final MEMORY or SSE classes when necessary.
2200 ///
2201 /// \param AggregateSize - The size of the current aggregate in
2202 /// the classification process.
2203 ///
2204 /// \param Lo - The classification for the parts of the type
2205 /// residing in the low word of the containing object.
2206 ///
2207 /// \param Hi - The classification for the parts of the type
2208 /// residing in the higher words of the containing object.
2209 ///
2210 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2211
2212 /// classify - Determine the x86_64 register classes in which the
2213 /// given type T should be passed.
2214 ///
2215 /// \param Lo - The classification for the parts of the type
2216 /// residing in the low word of the containing object.
2217 ///
2218 /// \param Hi - The classification for the parts of the type
2219 /// residing in the high word of the containing object.
2220 ///
2221 /// \param OffsetBase - The bit offset of this type in the
2222 /// containing object. Some parameters are classified different
2223 /// depending on whether they straddle an eightbyte boundary.
2224 ///
2225 /// \param isNamedArg - Whether the argument in question is a "named"
2226 /// argument, as used in AMD64-ABI 3.5.7.
2227 ///
2228 /// If a word is unused its result will be NoClass; if a type should
2229 /// be passed in Memory then at least the classification of \arg Lo
2230 /// will be Memory.
2231 ///
2232 /// The \arg Lo class will be NoClass iff the argument is ignored.
2233 ///
2234 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2235 /// also be ComplexX87.
2236 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2237 bool isNamedArg) const;
2238
2239 llvm::Type *GetByteVectorType(QualType Ty) const;
2240 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2241 unsigned IROffset, QualType SourceTy,
2242 unsigned SourceOffset) const;
2243 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2244 unsigned IROffset, QualType SourceTy,
2245 unsigned SourceOffset) const;
2246
2247 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2248 /// such that the argument will be returned in memory.
2249 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2250
2251 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2252 /// such that the argument will be passed in memory.
2253 ///
2254 /// \param freeIntRegs - The number of free integer registers remaining
2255 /// available.
2256 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2257
2258 ABIArgInfo classifyReturnType(QualType RetTy) const;
2259
2260 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2261 unsigned &neededInt, unsigned &neededSSE,
2262 bool isNamedArg) const;
2263
2264 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2265 unsigned &NeededSSE) const;
2266
2267 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2268 unsigned &NeededSSE) const;
2269
2270 bool IsIllegalVectorType(QualType Ty) const;
2271
2272 /// The 0.98 ABI revision clarified a lot of ambiguities,
2273 /// unfortunately in ways that were not always consistent with
2274 /// certain previous compilers. In particular, platforms which
2275 /// required strict binary compatibility with older versions of GCC
2276 /// may need to exempt themselves.
2277 bool honorsRevision0_98() const {
2278 return !getTarget().getTriple().isOSDarwin();
2279 }
2280
2281 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2282 /// classify it as INTEGER (for compatibility with older clang compilers).
2283 bool classifyIntegerMMXAsSSE() const {
2284 // Clang <= 3.8 did not do this.
2285 if (getContext().getLangOpts().getClangABICompat() <=
2286 LangOptions::ClangABI::Ver3_8)
2287 return false;
2288
2289 const llvm::Triple &Triple = getTarget().getTriple();
2290 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2291 return false;
2292 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2293 return false;
2294 return true;
2295 }
2296
2297 // GCC classifies vectors of __int128 as memory.
2298 bool passInt128VectorsInMem() const {
2299 // Clang <= 9.0 did not do this.
2300 if (getContext().getLangOpts().getClangABICompat() <=
2301 LangOptions::ClangABI::Ver9)
2302 return false;
2303
2304 const llvm::Triple &T = getTarget().getTriple();
2305 return T.isOSLinux() || T.isOSNetBSD();
2306 }
2307
2308 X86AVXABILevel AVXLevel;
2309 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2310 // 64-bit hardware.
2311 bool Has64BitPointers;
2312
2313public:
2314 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2315 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2316 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2317 }
2318
2319 bool isPassedUsingAVXType(QualType type) const {
2320 unsigned neededInt, neededSSE;
2321 // The freeIntRegs argument doesn't matter here.
2322 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2323 /*isNamedArg*/true);
2324 if (info.isDirect()) {
2325 llvm::Type *ty = info.getCoerceToType();
2326 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2327 return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
2328 }
2329 return false;
2330 }
2331
2332 void computeInfo(CGFunctionInfo &FI) const override;
2333
2334 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2335 QualType Ty) const override;
2336 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2337 QualType Ty) const override;
2338
2339 bool has64BitPointers() const {
2340 return Has64BitPointers;
2341 }
2342
2343 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
2344 bool asReturnValue) const override {
2345 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2346 }
2347 bool isSwiftErrorInRegister() const override {
2348 return true;
2349 }
2350};
2351
2352/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2353class WinX86_64ABIInfo : public SwiftABIInfo {
2354public:
2355 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2356 : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2357 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2358
2359 void computeInfo(CGFunctionInfo &FI) const override;
2360
2361 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2362 QualType Ty) const override;
2363
2364 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2365 // FIXME: Assumes vectorcall is in use.
2366 return isX86VectorTypeForVectorCall(getContext(), Ty);
2367 }
2368
2369 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2370 uint64_t NumMembers) const override {
2371 // FIXME: Assumes vectorcall is in use.
2372 return isX86VectorCallAggregateSmallEnough(NumMembers);
2373 }
2374
2375 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
2376 bool asReturnValue) const override {
2377 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2378 }
2379
2380 bool isSwiftErrorInRegister() const override {
2381 return true;
2382 }
2383
2384private:
2385 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2386 bool IsVectorCall, bool IsRegCall) const;
2387 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2388 const ABIArgInfo &current) const;
2389 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2390 bool IsVectorCall, bool IsRegCall) const;
2391
2392 X86AVXABILevel AVXLevel;
2393
2394 bool IsMingw64;
2395};
2396
2397class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2398public:
2399 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2400 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {}
2401
2402 const X86_64ABIInfo &getABIInfo() const {
2403 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2404 }
2405
2406 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
2407 /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
2408 bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
2409
2410 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2411 return 7;
2412 }
2413
2414 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2415 llvm::Value *Address) const override {
2416 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2417
2418 // 0-15 are the 16 integer registers.
2419 // 16 is %rip.
2420 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2421 return false;
2422 }
2423
2424 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2425 StringRef Constraint,
2426 llvm::Type* Ty) const override {
2427 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2428 }
2429
2430 bool isNoProtoCallVariadic(const CallArgList &args,
2431 const FunctionNoProtoType *fnType) const override {
2432 // The default CC on x86-64 sets %al to the number of SSA
2433 // registers used, and GCC sets this when calling an unprototyped
2434 // function, so we override the default behavior. However, don't do
2435 // that when AVX types are involved: the ABI explicitly states it is
2436 // undefined, and it doesn't work in practice because of how the ABI
2437 // defines varargs anyway.
2438 if (fnType->getCallConv() == CC_C) {
2439 bool HasAVXType = false;
2440 for (CallArgList::const_iterator
2441 it = args.begin(), ie = args.end(); it != ie; ++it) {
2442 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2443 HasAVXType = true;
2444 break;
2445 }
2446 }
2447
2448 if (!HasAVXType)
2449 return true;
2450 }
2451
2452 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2453 }
2454
2455 llvm::Constant *
2456 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2457 unsigned Sig = (0xeb << 0) | // jmp rel8
2458 (0x06 << 8) | // .+0x08
2459 ('v' << 16) |
2460 ('2' << 24);
2461 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2462 }
2463
2464 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2465 CodeGen::CodeGenModule &CGM) const override {
2466 if (GV->isDeclaration())
2467 return;
2468 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2469 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2470 llvm::Function *Fn = cast<llvm::Function>(GV);
2471 Fn->addFnAttr("stackrealign");
2472 }
2473 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2474 llvm::Function *Fn = cast<llvm::Function>(GV);
2475 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2476 }
2477 }
2478 }
2479
2480 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
2481 const FunctionDecl *Caller,
2482 const FunctionDecl *Callee,
2483 const CallArgList &Args) const override;
2484};
2485
2486static void initFeatureMaps(const ASTContext &Ctx,
2487 llvm::StringMap<bool> &CallerMap,
2488 const FunctionDecl *Caller,
2489 llvm::StringMap<bool> &CalleeMap,
2490 const FunctionDecl *Callee) {
2491 if (CalleeMap.empty() && CallerMap.empty()) {
2492 // The caller is potentially nullptr in the case where the call isn't in a
2493 // function. In this case, the getFunctionFeatureMap ensures we just get
2494 // the TU level setting (since it cannot be modified by 'target'..
2495 Ctx.getFunctionFeatureMap(CallerMap, Caller);
2496 Ctx.getFunctionFeatureMap(CalleeMap, Callee);
2497 }
2498}
2499
2500static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
2501 SourceLocation CallLoc,
2502 const llvm::StringMap<bool> &CallerMap,
2503 const llvm::StringMap<bool> &CalleeMap,
2504 QualType Ty, StringRef Feature,
2505 bool IsArgument) {
2506 bool CallerHasFeat = CallerMap.lookup(Feature);
2507 bool CalleeHasFeat = CalleeMap.lookup(Feature);
2508 if (!CallerHasFeat && !CalleeHasFeat)
2509 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
2510 << IsArgument << Ty << Feature;
2511
2512 // Mixing calling conventions here is very clearly an error.
2513 if (!CallerHasFeat || !CalleeHasFeat)
2514 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
2515 << IsArgument << Ty << Feature;
2516
2517 // Else, both caller and callee have the required feature, so there is no need
2518 // to diagnose.
2519 return false;
2520}
2521
2522static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
2523 SourceLocation CallLoc,
2524 const llvm::StringMap<bool> &CallerMap,
2525 const llvm::StringMap<bool> &CalleeMap, QualType Ty,
2526 bool IsArgument) {
2527 uint64_t Size = Ctx.getTypeSize(Ty);
2528 if (Size > 256)
2529 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
2530 "avx512f", IsArgument);
2531
2532 if (Size > 128)
2533 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
2534 IsArgument);
2535
2536 return false;
2537}
2538
2539void X86_64TargetCodeGenInfo::checkFunctionCallABI(
2540 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
2541 const FunctionDecl *Callee, const CallArgList &Args) const {
2542 llvm::StringMap<bool> CallerMap;
2543 llvm::StringMap<bool> CalleeMap;
2544 unsigned ArgIndex = 0;
2545
2546 // We need to loop through the actual call arguments rather than the the
2547 // function's parameters, in case this variadic.
2548 for (const CallArg &Arg : Args) {
2549 // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
2550 // additionally changes how vectors >256 in size are passed. Like GCC, we
2551 // warn when a function is called with an argument where this will change.
2552 // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
2553 // the caller and callee features are mismatched.
2554 // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
2555 // change its ABI with attribute-target after this call.
2556 if (Arg.getType()->isVectorType() &&
2557 CGM.getContext().getTypeSize(Arg.getType()) > 128) {
2558 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2559 QualType Ty = Arg.getType();
2560 // The CallArg seems to have desugared the type already, so for clearer
2561 // diagnostics, replace it with the type in the FunctionDecl if possible.
2562 if (ArgIndex < Callee->getNumParams())
2563 Ty = Callee->getParamDecl(ArgIndex)->getType();
2564
2565 if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2566 CalleeMap, Ty, /*IsArgument*/ true))
2567 return;
2568 }
2569 ++ArgIndex;
2570 }
2571
2572 // Check return always, as we don't have a good way of knowing in codegen
2573 // whether this value is used, tail-called, etc.
2574 if (Callee->getReturnType()->isVectorType() &&
2575 CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
2576 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2577 checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2578 CalleeMap, Callee->getReturnType(),
2579 /*IsArgument*/ false);
2580 }
2581}
2582
2583static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2584 // If the argument does not end in .lib, automatically add the suffix.
2585 // If the argument contains a space, enclose it in quotes.
2586 // This matches the behavior of MSVC.
2587 bool Quote = (Lib.find(" ") != StringRef::npos);
2588 std::string ArgStr = Quote ? "\"" : "";
2589 ArgStr += Lib;
2590 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
2591 ArgStr += ".lib";
2592 ArgStr += Quote ? "\"" : "";
2593 return ArgStr;
2594}
2595
2596class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2597public:
2598 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2599 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2600 unsigned NumRegisterParameters)
2601 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2602 Win32StructABI, NumRegisterParameters, false) {}
2603
2604 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2605 CodeGen::CodeGenModule &CGM) const override;
2606
2607 void getDependentLibraryOption(llvm::StringRef Lib,
2608 llvm::SmallString<24> &Opt) const override {
2609 Opt = "/DEFAULTLIB:";
2610 Opt += qualifyWindowsLibrary(Lib);
2611 }
2612
2613 void getDetectMismatchOption(llvm::StringRef Name,
2614 llvm::StringRef Value,
2615 llvm::SmallString<32> &Opt) const override {
2616 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2617 }
2618};
2619
2620static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2621 CodeGen::CodeGenModule &CGM) {
2622 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2623
2624 if (CGM.getCodeGenOpts().StackProbeSize != 4096)
2625 Fn->addFnAttr("stack-probe-size",
2626 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2627 if (CGM.getCodeGenOpts().NoStackArgProbe)
2628 Fn->addFnAttr("no-stack-arg-probe");
2629 }
2630}
2631
2632void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2633 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2634 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2635 if (GV->isDeclaration())
2636 return;
2637 addStackProbeTargetAttributes(D, GV, CGM);
2638}
2639
2640class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2641public:
2642 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2643 X86AVXABILevel AVXLevel)
2644 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {}
2645
2646 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2647 CodeGen::CodeGenModule &CGM) const override;
2648
2649 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2650 return 7;
2651 }
2652
2653 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2654 llvm::Value *Address) const override {
2655 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2656
2657 // 0-15 are the 16 integer registers.
2658 // 16 is %rip.
2659 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2660 return false;
2661 }
2662
2663 void getDependentLibraryOption(llvm::StringRef Lib,
2664 llvm::SmallString<24> &Opt) const override {
2665 Opt = "/DEFAULTLIB:";
2666 Opt += qualifyWindowsLibrary(Lib);
2667 }
2668
2669 void getDetectMismatchOption(llvm::StringRef Name,
2670 llvm::StringRef Value,
2671 llvm::SmallString<32> &Opt) const override {
2672 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2673 }
2674};
2675
2676void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2677 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2678 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2679 if (GV->isDeclaration())
2680 return;
2681 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2682 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2683 llvm::Function *Fn = cast<llvm::Function>(GV);
2684 Fn->addFnAttr("stackrealign");
2685 }
2686 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2687 llvm::Function *Fn = cast<llvm::Function>(GV);
2688 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2689 }
2690 }
2691
2692 addStackProbeTargetAttributes(D, GV, CGM);
2693}
2694}
2695
2696void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2697 Class &Hi) const {
2698 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2699 //
2700 // (a) If one of the classes is Memory, the whole argument is passed in
2701 // memory.
2702 //
2703 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2704 // memory.
2705 //
2706 // (c) If the size of the aggregate exceeds two eightbytes and the first
2707 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2708 // argument is passed in memory. NOTE: This is necessary to keep the
2709 // ABI working for processors that don't support the __m256 type.
2710 //
2711 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2712 //
2713 // Some of these are enforced by the merging logic. Others can arise
2714 // only with unions; for example:
2715 // union { _Complex double; unsigned; }
2716 //
2717 // Note that clauses (b) and (c) were added in 0.98.
2718 //
2719 if (Hi == Memory)
2720 Lo = Memory;
2721 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2722 Lo = Memory;
2723 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2724 Lo = Memory;
2725 if (Hi == SSEUp && Lo != SSE)
2726 Hi = SSE;
2727}
2728
2729X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2730 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2731 // classified recursively so that always two fields are
2732 // considered. The resulting class is calculated according to
2733 // the classes of the fields in the eightbyte:
2734 //
2735 // (a) If both classes are equal, this is the resulting class.
2736 //
2737 // (b) If one of the classes is NO_CLASS, the resulting class is
2738 // the other class.
2739 //
2740 // (c) If one of the classes is MEMORY, the result is the MEMORY
2741 // class.
2742 //
2743 // (d) If one of the classes is INTEGER, the result is the
2744 // INTEGER.
2745 //
2746 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2747 // MEMORY is used as class.
2748 //
2749 // (f) Otherwise class SSE is used.
2750
2751 // Accum should never be memory (we should have returned) or
2752 // ComplexX87 (because this cannot be passed in a structure).
2753 assert((Accum != Memory && Accum != ComplexX87) &&(((Accum != Memory && Accum != ComplexX87) &&
"Invalid accumulated classification during merge.") ? static_cast
<void> (0) : __assert_fail ("(Accum != Memory && Accum != ComplexX87) && \"Invalid accumulated classification during merge.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 2754, __PRETTY_FUNCTION__))
2754 "Invalid accumulated classification during merge.")(((Accum != Memory && Accum != ComplexX87) &&
"Invalid accumulated classification during merge.") ? static_cast
<void> (0) : __assert_fail ("(Accum != Memory && Accum != ComplexX87) && \"Invalid accumulated classification during merge.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 2754, __PRETTY_FUNCTION__))
;
2755 if (Accum == Field || Field == NoClass)
2756 return Accum;
2757 if (Field == Memory)
2758 return Memory;
2759 if (Accum == NoClass)
2760 return Field;
2761 if (Accum == Integer || Field == Integer)
2762 return Integer;
2763 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2764 Accum == X87 || Accum == X87Up)
2765 return Memory;
2766 return SSE;
2767}
2768
2769void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2770 Class &Lo, Class &Hi, bool isNamedArg) const {
2771 // FIXME: This code can be simplified by introducing a simple value class for
2772 // Class pairs with appropriate constructor methods for the various
2773 // situations.
2774
2775 // FIXME: Some of the split computations are wrong; unaligned vectors
2776 // shouldn't be passed in registers for example, so there is no chance they
2777 // can straddle an eightbyte. Verify & simplify.
2778
2779 Lo = Hi = NoClass;
2780
2781 Class &Current = OffsetBase < 64 ? Lo : Hi;
2782 Current = Memory;
2783
2784 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2785 BuiltinType::Kind k = BT->getKind();
2786
2787 if (k == BuiltinType::Void) {
2788 Current = NoClass;
2789 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2790 Lo = Integer;
2791 Hi = Integer;
2792 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2793 Current = Integer;
2794 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2795 Current = SSE;
2796 } else if (k == BuiltinType::LongDouble) {
2797 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2798 if (LDF == &llvm::APFloat::IEEEquad()) {
2799 Lo = SSE;
2800 Hi = SSEUp;
2801 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2802 Lo = X87;
2803 Hi = X87Up;
2804 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2805 Current = SSE;
2806 } else
2807 llvm_unreachable("unexpected long double representation!")::llvm::llvm_unreachable_internal("unexpected long double representation!"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 2807)
;
2808 }
2809 // FIXME: _Decimal32 and _Decimal64 are SSE.
2810 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2811 return;
2812 }
2813
2814 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2815 // Classify the underlying integer type.
2816 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2817 return;
2818 }
2819
2820 if (Ty->hasPointerRepresentation()) {
2821 Current = Integer;
2822 return;
2823 }
2824
2825 if (Ty->isMemberPointerType()) {
2826 if (Ty->isMemberFunctionPointerType()) {
2827 if (Has64BitPointers) {
2828 // If Has64BitPointers, this is an {i64, i64}, so classify both
2829 // Lo and Hi now.
2830 Lo = Hi = Integer;
2831 } else {
2832 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2833 // straddles an eightbyte boundary, Hi should be classified as well.
2834 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2835 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2836 if (EB_FuncPtr != EB_ThisAdj) {
2837 Lo = Hi = Integer;
2838 } else {
2839 Current = Integer;
2840 }
2841 }
2842 } else {
2843 Current = Integer;
2844 }
2845 return;
2846 }
2847
2848 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2849 uint64_t Size = getContext().getTypeSize(VT);
2850 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2851 // gcc passes the following as integer:
2852 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2853 // 2 bytes - <2 x char>, <1 x short>
2854 // 1 byte - <1 x char>
2855 Current = Integer;
2856
2857 // If this type crosses an eightbyte boundary, it should be
2858 // split.
2859 uint64_t EB_Lo = (OffsetBase) / 64;
2860 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2861 if (EB_Lo != EB_Hi)
2862 Hi = Lo;
2863 } else if (Size == 64) {
2864 QualType ElementType = VT->getElementType();
2865
2866 // gcc passes <1 x double> in memory. :(
2867 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2868 return;
2869
2870 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2871 // pass them as integer. For platforms where clang is the de facto
2872 // platform compiler, we must continue to use integer.
2873 if (!classifyIntegerMMXAsSSE() &&
2874 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2875 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2876 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2877 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2878 Current = Integer;
2879 else
2880 Current = SSE;
2881
2882 // If this type crosses an eightbyte boundary, it should be
2883 // split.
2884 if (OffsetBase && OffsetBase != 64)
2885 Hi = Lo;
2886 } else if (Size == 128 ||
2887 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2888 QualType ElementType = VT->getElementType();
2889
2890 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
2891 if (passInt128VectorsInMem() && Size != 128 &&
2892 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
2893 ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
2894 return;
2895
2896 // Arguments of 256-bits are split into four eightbyte chunks. The
2897 // least significant one belongs to class SSE and all the others to class
2898 // SSEUP. The original Lo and Hi design considers that types can't be
2899 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2900 // This design isn't correct for 256-bits, but since there're no cases
2901 // where the upper parts would need to be inspected, avoid adding
2902 // complexity and just consider Hi to match the 64-256 part.
2903 //
2904 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2905 // registers if they are "named", i.e. not part of the "..." of a
2906 // variadic function.
2907 //
2908 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2909 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2910 Lo = SSE;
2911 Hi = SSEUp;
2912 }
2913 return;
2914 }
2915
2916 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2917 QualType ET = getContext().getCanonicalType(CT->getElementType());
2918
2919 uint64_t Size = getContext().getTypeSize(Ty);
2920 if (ET->isIntegralOrEnumerationType()) {
2921 if (Size <= 64)
2922 Current = Integer;
2923 else if (Size <= 128)
2924 Lo = Hi = Integer;
2925 } else if (ET == getContext().FloatTy) {
2926 Current = SSE;
2927 } else if (ET == getContext().DoubleTy) {
2928 Lo = Hi = SSE;
2929 } else if (ET == getContext().LongDoubleTy) {
2930 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2931 if (LDF == &llvm::APFloat::IEEEquad())
2932 Current = Memory;
2933 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2934 Current = ComplexX87;
2935 else if (LDF == &llvm::APFloat::IEEEdouble())
2936 Lo = Hi = SSE;
2937 else
2938 llvm_unreachable("unexpected long double representation!")::llvm::llvm_unreachable_internal("unexpected long double representation!"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 2938)
;
2939 }
2940
2941 // If this complex type crosses an eightbyte boundary then it
2942 // should be split.
2943 uint64_t EB_Real = (OffsetBase) / 64;
2944 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2945 if (Hi == NoClass && EB_Real != EB_Imag)
2946 Hi = Lo;
2947
2948 return;
2949 }
2950
2951 if (const auto *EITy = Ty->getAs<ExtIntType>()) {
2952 if (EITy->getNumBits() <= 64)
2953 Current = Integer;
2954 else if (EITy->getNumBits() <= 128)
2955 Lo = Hi = Integer;
2956 // Larger values need to get passed in memory.
2957 return;
2958 }
2959
2960 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2961 // Arrays are treated like structures.
2962
2963 uint64_t Size = getContext().getTypeSize(Ty);
2964
2965 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2966 // than eight eightbytes, ..., it has class MEMORY.
2967 if (Size > 512)
2968 return;
2969
2970 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2971 // fields, it has class MEMORY.
2972 //
2973 // Only need to check alignment of array base.
2974 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2975 return;
2976
2977 // Otherwise implement simplified merge. We could be smarter about
2978 // this, but it isn't worth it and would be harder to verify.
2979 Current = NoClass;
2980 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2981 uint64_t ArraySize = AT->getSize().getZExtValue();
2982
2983 // The only case a 256-bit wide vector could be used is when the array
2984 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2985 // to work for sizes wider than 128, early check and fallback to memory.
2986 //
2987 if (Size > 128 &&
2988 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2989 return;
2990
2991 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2992 Class FieldLo, FieldHi;
2993 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2994 Lo = merge(Lo, FieldLo);
2995 Hi = merge(Hi, FieldHi);
2996 if (Lo == Memory || Hi == Memory)
2997 break;
2998 }
2999
3000 postMerge(Size, Lo, Hi);
3001 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.")(((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."
) ? static_cast<void> (0) : __assert_fail ("(Hi != SSEUp || Lo == SSE) && \"Invalid SSEUp array classification.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3001, __PRETTY_FUNCTION__))
;
3002 return;
3003 }
3004
3005 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3006 uint64_t Size = getContext().getTypeSize(Ty);
3007
3008 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
3009 // than eight eightbytes, ..., it has class MEMORY.
3010 if (Size > 512)
3011 return;
3012
3013 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
3014 // copy constructor or a non-trivial destructor, it is passed by invisible
3015 // reference.
3016 if (getRecordArgABI(RT, getCXXABI()))
3017 return;
3018
3019 const RecordDecl *RD = RT->getDecl();
3020
3021 // Assume variable sized types are passed in memory.
3022 if (RD->hasFlexibleArrayMember())
3023 return;
3024
3025 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
3026
3027 // Reset Lo class, this will be recomputed.
3028 Current = NoClass;
3029
3030 // If this is a C++ record, classify the bases first.
3031 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3032 for (const auto &I : CXXRD->bases()) {
3033 assert(!I.isVirtual() && !I.getType()->isDependentType() &&((!I.isVirtual() && !I.getType()->isDependentType(
) && "Unexpected base class!") ? static_cast<void>
(0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3034, __PRETTY_FUNCTION__))
3034 "Unexpected base class!")((!I.isVirtual() && !I.getType()->isDependentType(
) && "Unexpected base class!") ? static_cast<void>
(0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3034, __PRETTY_FUNCTION__))
;
3035 const auto *Base =
3036 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3037
3038 // Classify this field.
3039 //
3040 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
3041 // single eightbyte, each is classified separately. Each eightbyte gets
3042 // initialized to class NO_CLASS.
3043 Class FieldLo, FieldHi;
3044 uint64_t Offset =
3045 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
3046 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
3047 Lo = merge(Lo, FieldLo);
3048 Hi = merge(Hi, FieldHi);
3049 if (Lo == Memory || Hi == Memory) {
3050 postMerge(Size, Lo, Hi);
3051 return;
3052 }
3053 }
3054 }
3055
3056 // Classify the fields one at a time, merging the results.
3057 unsigned idx = 0;
3058 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3059 i != e; ++i, ++idx) {
3060 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3061 bool BitField = i->isBitField();
3062
3063 // Ignore padding bit-fields.
3064 if (BitField && i->isUnnamedBitfield())
3065 continue;
3066
3067 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
3068 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
3069 //
3070 // The only case a 256-bit wide vector could be used is when the struct
3071 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
3072 // to work for sizes wider than 128, early check and fallback to memory.
3073 //
3074 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
3075 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
3076 Lo = Memory;
3077 postMerge(Size, Lo, Hi);
3078 return;
3079 }
3080 // Note, skip this test for bit-fields, see below.
3081 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
3082 Lo = Memory;
3083 postMerge(Size, Lo, Hi);
3084 return;
3085 }
3086
3087 // Classify this field.
3088 //
3089 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
3090 // exceeds a single eightbyte, each is classified
3091 // separately. Each eightbyte gets initialized to class
3092 // NO_CLASS.
3093 Class FieldLo, FieldHi;
3094
3095 // Bit-fields require special handling, they do not force the
3096 // structure to be passed in memory even if unaligned, and
3097 // therefore they can straddle an eightbyte.
3098 if (BitField) {
3099 assert(!i->isUnnamedBitfield())((!i->isUnnamedBitfield()) ? static_cast<void> (0) :
__assert_fail ("!i->isUnnamedBitfield()", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3099, __PRETTY_FUNCTION__))
;
3100 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3101 uint64_t Size = i->getBitWidthValue(getContext());
3102
3103 uint64_t EB_Lo = Offset / 64;
3104 uint64_t EB_Hi = (Offset + Size - 1) / 64;
3105
3106 if (EB_Lo) {
3107 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.")((EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."
) ? static_cast<void> (0) : __assert_fail ("EB_Hi == EB_Lo && \"Invalid classification, type > 16 bytes.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3107, __PRETTY_FUNCTION__))
;
3108 FieldLo = NoClass;
3109 FieldHi = Integer;
3110 } else {
3111 FieldLo = Integer;
3112 FieldHi = EB_Hi ? Integer : NoClass;
3113 }
3114 } else
3115 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
3116 Lo = merge(Lo, FieldLo);
3117 Hi = merge(Hi, FieldHi);
3118 if (Lo == Memory || Hi == Memory)
3119 break;
3120 }
3121
3122 postMerge(Size, Lo, Hi);
3123 }
3124}
3125
3126ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
3127 // If this is a scalar LLVM value then assume LLVM will pass it in the right
3128 // place naturally.
3129 if (!isAggregateTypeForABI(Ty)) {
3130 // Treat an enum type as its underlying type.
3131 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3132 Ty = EnumTy->getDecl()->getIntegerType();
3133
3134 if (Ty->isExtIntType())
3135 return getNaturalAlignIndirect(Ty);
3136
3137 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3138 : ABIArgInfo::getDirect());
3139 }
3140
3141 return getNaturalAlignIndirect(Ty);
3142}
3143
3144bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
3145 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
3146 uint64_t Size = getContext().getTypeSize(VecTy);
3147 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
3148 if (Size <= 64 || Size > LargestVector)
3149 return true;
3150 QualType EltTy = VecTy->getElementType();
3151 if (passInt128VectorsInMem() &&
3152 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
3153 EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
3154 return true;
3155 }
3156
3157 return false;
3158}
3159
3160ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
3161 unsigned freeIntRegs) const {
3162 // If this is a scalar LLVM value then assume LLVM will pass it in the right
3163 // place naturally.
3164 //
3165 // This assumption is optimistic, as there could be free registers available
3166 // when we need to pass this argument in memory, and LLVM could try to pass
3167 // the argument in the free register. This does not seem to happen currently,
3168 // but this code would be much safer if we could mark the argument with
3169 // 'onstack'. See PR12193.
3170 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
3171 !Ty->isExtIntType()) {
3172 // Treat an enum type as its underlying type.
3173 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3174 Ty = EnumTy->getDecl()->getIntegerType();
3175
3176 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3177 : ABIArgInfo::getDirect());
3178 }
3179
3180 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3181 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3182
3183 // Compute the byval alignment. We specify the alignment of the byval in all
3184 // cases so that the mid-level optimizer knows the alignment of the byval.
3185 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
3186
3187 // Attempt to avoid passing indirect results using byval when possible. This
3188 // is important for good codegen.
3189 //
3190 // We do this by coercing the value into a scalar type which the backend can
3191 // handle naturally (i.e., without using byval).
3192 //
3193 // For simplicity, we currently only do this when we have exhausted all of the
3194 // free integer registers. Doing this when there are free integer registers
3195 // would require more care, as we would have to ensure that the coerced value
3196 // did not claim the unused register. That would require either reording the
3197 // arguments to the function (so that any subsequent inreg values came first),
3198 // or only doing this optimization when there were no following arguments that
3199 // might be inreg.
3200 //
3201 // We currently expect it to be rare (particularly in well written code) for
3202 // arguments to be passed on the stack when there are still free integer
3203 // registers available (this would typically imply large structs being passed
3204 // by value), so this seems like a fair tradeoff for now.
3205 //
3206 // We can revisit this if the backend grows support for 'onstack' parameter
3207 // attributes. See PR12193.
3208 if (freeIntRegs == 0) {
3209 uint64_t Size = getContext().getTypeSize(Ty);
3210
3211 // If this type fits in an eightbyte, coerce it into the matching integral
3212 // type, which will end up on the stack (with alignment 8).
3213 if (Align == 8 && Size <= 64)
3214 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
3215 Size));
3216 }
3217
3218 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
3219}
3220
3221/// The ABI specifies that a value should be passed in a full vector XMM/YMM
3222/// register. Pick an LLVM IR type that will be passed as a vector register.
3223llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
3224 // Wrapper structs/arrays that only contain vectors are passed just like
3225 // vectors; strip them off if present.
3226 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
3227 Ty = QualType(InnerTy, 0);
3228
3229 llvm::Type *IRType = CGT.ConvertType(Ty);
3230 if (isa<llvm::VectorType>(IRType)) {
3231 // Don't pass vXi128 vectors in their native type, the backend can't
3232 // legalize them.
3233 if (passInt128VectorsInMem() &&
3234 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
3235 // Use a vXi64 vector.
3236 uint64_t Size = getContext().getTypeSize(Ty);
3237 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
3238 Size / 64);
3239 }
3240
3241 return IRType;
3242 }
3243
3244 if (IRType->getTypeID() == llvm::Type::FP128TyID)
3245 return IRType;
3246
3247 // We couldn't find the preferred IR vector type for 'Ty'.
3248 uint64_t Size = getContext().getTypeSize(Ty);
3249 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!")(((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"
) ? static_cast<void> (0) : __assert_fail ("(Size == 128 || Size == 256 || Size == 512) && \"Invalid type found!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3249, __PRETTY_FUNCTION__))
;
3250
3251
3252 // Return a LLVM IR vector type based on the size of 'Ty'.
3253 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
3254 Size / 64);
3255}
3256
3257/// BitsContainNoUserData - Return true if the specified [start,end) bit range
3258/// is known to either be off the end of the specified type or being in
3259/// alignment padding. The user type specified is known to be at most 128 bits
3260/// in size, and have passed through X86_64ABIInfo::classify with a successful
3261/// classification that put one of the two halves in the INTEGER class.
3262///
3263/// It is conservatively correct to return false.
3264static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
3265 unsigned EndBit, ASTContext &Context) {
3266 // If the bytes being queried are off the end of the type, there is no user
3267 // data hiding here. This handles analysis of builtins, vectors and other
3268 // types that don't contain interesting padding.
3269 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
3270 if (TySize <= StartBit)
3271 return true;
3272
3273 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3274 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
3275 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3276
3277 // Check each element to see if the element overlaps with the queried range.
3278 for (unsigned i = 0; i != NumElts; ++i) {
3279 // If the element is after the span we care about, then we're done..
3280 unsigned EltOffset = i*EltSize;
3281 if (EltOffset >= EndBit) break;
3282
3283 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3284 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
3285 EndBit-EltOffset, Context))
3286 return false;
3287 }
3288 // If it overlaps no elements, then it is safe to process as padding.
3289 return true;
3290 }
3291
3292 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3293 const RecordDecl *RD = RT->getDecl();
3294 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3295
3296 // If this is a C++ record, check the bases first.
3297 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3298 for (const auto &I : CXXRD->bases()) {
3299 assert(!I.isVirtual() && !I.getType()->isDependentType() &&((!I.isVirtual() && !I.getType()->isDependentType(
) && "Unexpected base class!") ? static_cast<void>
(0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3300, __PRETTY_FUNCTION__))
3300 "Unexpected base class!")((!I.isVirtual() && !I.getType()->isDependentType(
) && "Unexpected base class!") ? static_cast<void>
(0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3300, __PRETTY_FUNCTION__))
;
3301 const auto *Base =
3302 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3303
3304 // If the base is after the span we care about, ignore it.
3305 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3306 if (BaseOffset >= EndBit) continue;
3307
3308 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3309 if (!BitsContainNoUserData(I.getType(), BaseStart,
3310 EndBit-BaseOffset, Context))
3311 return false;
3312 }
3313 }
3314
3315 // Verify that no field has data that overlaps the region of interest. Yes
3316 // this could be sped up a lot by being smarter about queried fields,
3317 // however we're only looking at structs up to 16 bytes, so we don't care
3318 // much.
3319 unsigned idx = 0;
3320 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3321 i != e; ++i, ++idx) {
3322 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3323
3324 // If we found a field after the region we care about, then we're done.
3325 if (FieldOffset >= EndBit) break;
3326
3327 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3328 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3329 Context))
3330 return false;
3331 }
3332
3333 // If nothing in this record overlapped the area of interest, then we're
3334 // clean.
3335 return true;
3336 }
3337
3338 return false;
3339}
3340
3341/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3342/// float member at the specified offset. For example, {int,{float}} has a
3343/// float at offset 4. It is conservatively correct for this routine to return
3344/// false.
3345static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3346 const llvm::DataLayout &TD) {
3347 // Base case if we find a float.
3348 if (IROffset == 0 && IRType->isFloatTy())
3349 return true;
3350
3351 // If this is a struct, recurse into the field at the specified offset.
3352 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3353 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3354 unsigned Elt = SL->getElementContainingOffset(IROffset);
3355 IROffset -= SL->getElementOffset(Elt);
3356 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3357 }
3358
3359 // If this is an array, recurse into the field at the specified offset.
3360 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3361 llvm::Type *EltTy = ATy->getElementType();
3362 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3363 IROffset -= IROffset/EltSize*EltSize;
3364 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3365 }
3366
3367 return false;
3368}
3369
3370
3371/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3372/// low 8 bytes of an XMM register, corresponding to the SSE class.
3373llvm::Type *X86_64ABIInfo::
3374GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3375 QualType SourceTy, unsigned SourceOffset) const {
3376 // The only three choices we have are either double, <2 x float>, or float. We
3377 // pass as float if the last 4 bytes is just padding. This happens for
3378 // structs that contain 3 floats.
3379 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3380 SourceOffset*8+64, getContext()))
3381 return llvm::Type::getFloatTy(getVMContext());
3382
3383 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3384 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3385 // case.
3386 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3387 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3388 return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()),
3389 2);
3390
3391 return llvm::Type::getDoubleTy(getVMContext());
3392}
3393
3394
3395/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3396/// an 8-byte GPR. This means that we either have a scalar or we are talking
3397/// about the high or low part of an up-to-16-byte struct. This routine picks
3398/// the best LLVM IR type to represent this, which may be i64 or may be anything
3399/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3400/// etc).
3401///
3402/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3403/// the source type. IROffset is an offset in bytes into the LLVM IR type that
3404/// the 8-byte value references. PrefType may be null.
3405///
3406/// SourceTy is the source-level type for the entire argument. SourceOffset is
3407/// an offset into this that we're processing (which is always either 0 or 8).
3408///
3409llvm::Type *X86_64ABIInfo::
3410GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3411 QualType SourceTy, unsigned SourceOffset) const {
3412 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3413 // returning an 8-byte unit starting with it. See if we can safely use it.
3414 if (IROffset == 0) {
3415 // Pointers and int64's always fill the 8-byte unit.
3416 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3417 IRType->isIntegerTy(64))
3418 return IRType;
3419
3420 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3421 // goodness in the source type is just tail padding. This is allowed to
3422 // kick in for struct {double,int} on the int, but not on
3423 // struct{double,int,int} because we wouldn't return the second int. We
3424 // have to do this analysis on the source type because we can't depend on
3425 // unions being lowered a specific way etc.
3426 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3427 IRType->isIntegerTy(32) ||
3428 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3429 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3430 cast<llvm::IntegerType>(IRType)->getBitWidth();
3431
3432 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3433 SourceOffset*8+64, getContext()))
3434 return IRType;
3435 }
3436 }
3437
3438 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3439 // If this is a struct, recurse into the field at the specified offset.
3440 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3441 if (IROffset < SL->getSizeInBytes()) {
3442 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3443 IROffset -= SL->getElementOffset(FieldIdx);
3444
3445 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3446 SourceTy, SourceOffset);
3447 }
3448 }
3449
3450 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3451 llvm::Type *EltTy = ATy->getElementType();
3452 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3453 unsigned EltOffset = IROffset/EltSize*EltSize;
3454 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3455 SourceOffset);
3456 }
3457
3458 // Okay, we don't have any better idea of what to pass, so we pass this in an
3459 // integer register that isn't too big to fit the rest of the struct.
3460 unsigned TySizeInBytes =
3461 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3462
3463 assert(TySizeInBytes != SourceOffset && "Empty field?")((TySizeInBytes != SourceOffset && "Empty field?") ? static_cast
<void> (0) : __assert_fail ("TySizeInBytes != SourceOffset && \"Empty field?\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3463, __PRETTY_FUNCTION__))
;
3464
3465 // It is always safe to classify this as an integer type up to i64 that
3466 // isn't larger than the structure.
3467 return llvm::IntegerType::get(getVMContext(),
3468 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3469}
3470
3471
3472/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3473/// be used as elements of a two register pair to pass or return, return a
3474/// first class aggregate to represent them. For example, if the low part of
3475/// a by-value argument should be passed as i32* and the high part as float,
3476/// return {i32*, float}.
3477static llvm::Type *
3478GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3479 const llvm::DataLayout &TD) {
3480 // In order to correctly satisfy the ABI, we need to the high part to start
3481 // at offset 8. If the high and low parts we inferred are both 4-byte types
3482 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3483 // the second element at offset 8. Check for this:
3484 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3485 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3486 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3487 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!")((HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"
) ? static_cast<void> (0) : __assert_fail ("HiStart != 0 && HiStart <= 8 && \"Invalid x86-64 argument pair!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3487, __PRETTY_FUNCTION__))
;
3488
3489 // To handle this, we have to increase the size of the low part so that the
3490 // second element will start at an 8 byte offset. We can't increase the size
3491 // of the second element because it might make us access off the end of the
3492 // struct.
3493 if (HiStart != 8) {
3494 // There are usually two sorts of types the ABI generation code can produce
3495 // for the low part of a pair that aren't 8 bytes in size: float or
3496 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3497 // NaCl).
3498 // Promote these to a larger type.
3499 if (Lo->isFloatTy())
3500 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3501 else {
3502 assert((Lo->isIntegerTy() || Lo->isPointerTy())(((Lo->isIntegerTy() || Lo->isPointerTy()) && "Invalid/unknown lo type"
) ? static_cast<void> (0) : __assert_fail ("(Lo->isIntegerTy() || Lo->isPointerTy()) && \"Invalid/unknown lo type\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3503, __PRETTY_FUNCTION__))
3503 && "Invalid/unknown lo type")(((Lo->isIntegerTy() || Lo->isPointerTy()) && "Invalid/unknown lo type"
) ? static_cast<void> (0) : __assert_fail ("(Lo->isIntegerTy() || Lo->isPointerTy()) && \"Invalid/unknown lo type\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3503, __PRETTY_FUNCTION__))
;
3504 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3505 }
3506 }
3507
3508 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3509
3510 // Verify that the second element is at an 8-byte offset.
3511 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&((TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
"Invalid x86-64 argument pair!") ? static_cast<void> (
0) : __assert_fail ("TD.getStructLayout(Result)->getElementOffset(1) == 8 && \"Invalid x86-64 argument pair!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3512, __PRETTY_FUNCTION__))
3512 "Invalid x86-64 argument pair!")((TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
"Invalid x86-64 argument pair!") ? static_cast<void> (
0) : __assert_fail ("TD.getStructLayout(Result)->getElementOffset(1) == 8 && \"Invalid x86-64 argument pair!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3512, __PRETTY_FUNCTION__))
;
3513 return Result;
3514}
3515
3516ABIArgInfo X86_64ABIInfo::
3517classifyReturnType(QualType RetTy) const {
3518 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3519 // classification algorithm.
3520 X86_64ABIInfo::Class Lo, Hi;
3521 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3522
3523 // Check some invariants.
3524 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.")(((Hi != Memory || Lo == Memory) && "Invalid memory classification."
) ? static_cast<void> (0) : __assert_fail ("(Hi != Memory || Lo == Memory) && \"Invalid memory classification.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3524, __PRETTY_FUNCTION__))
;
3525 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.")(((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."
) ? static_cast<void> (0) : __assert_fail ("(Hi != SSEUp || Lo == SSE) && \"Invalid SSEUp classification.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3525, __PRETTY_FUNCTION__))
;
3526
3527 llvm::Type *ResType = nullptr;
3528 switch (Lo) {
3529 case NoClass:
3530 if (Hi == NoClass)
3531 return ABIArgInfo::getIgnore();
3532 // If the low part is just padding, it takes no register, leave ResType
3533 // null.
3534 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part"
) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3535, __PRETTY_FUNCTION__))
3535 "Unknown missing lo part")(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part"
) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3535, __PRETTY_FUNCTION__))
;
3536 break;
3537
3538 case SSEUp:
3539 case X87Up:
3540 llvm_unreachable("Invalid classification for lo word.")::llvm::llvm_unreachable_internal("Invalid classification for lo word."
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3540)
;
3541
3542 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3543 // hidden argument.
3544 case Memory:
3545 return getIndirectReturnResult(RetTy);
3546
3547 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3548 // available register of the sequence %rax, %rdx is used.
3549 case Integer:
3550 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3551
3552 // If we have a sign or zero extended integer, make sure to return Extend
3553 // so that the parameter gets the right LLVM IR attributes.
3554 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3555 // Treat an enum type as its underlying type.
3556 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3557 RetTy = EnumTy->getDecl()->getIntegerType();
3558
3559 if (RetTy->isIntegralOrEnumerationType() &&
3560 isPromotableIntegerTypeForABI(RetTy))
3561 return ABIArgInfo::getExtend(RetTy);
3562 }
3563 break;
3564
3565 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3566 // available SSE register of the sequence %xmm0, %xmm1 is used.
3567 case SSE:
3568 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3569 break;
3570
3571 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3572 // returned on the X87 stack in %st0 as 80-bit x87 number.
3573 case X87:
3574 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3575 break;
3576
3577 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3578 // part of the value is returned in %st0 and the imaginary part in
3579 // %st1.
3580 case ComplexX87:
3581 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.")((Hi == ComplexX87 && "Unexpected ComplexX87 classification."
) ? static_cast<void> (0) : __assert_fail ("Hi == ComplexX87 && \"Unexpected ComplexX87 classification.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3581, __PRETTY_FUNCTION__))
;
3582 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3583 llvm::Type::getX86_FP80Ty(getVMContext()));
3584 break;
3585 }
3586
3587 llvm::Type *HighPart = nullptr;
3588 switch (Hi) {
3589 // Memory was handled previously and X87 should
3590 // never occur as a hi class.
3591 case Memory:
3592 case X87:
3593 llvm_unreachable("Invalid classification for hi word.")::llvm::llvm_unreachable_internal("Invalid classification for hi word."
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3593)
;
3594
3595 case ComplexX87: // Previously handled.
3596 case NoClass:
3597 break;
3598
3599 case Integer:
3600 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3601 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3602 return ABIArgInfo::getDirect(HighPart, 8);
3603 break;
3604 case SSE:
3605 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3606 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3607 return ABIArgInfo::getDirect(HighPart, 8);
3608 break;
3609
3610 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3611 // is passed in the next available eightbyte chunk if the last used
3612 // vector register.
3613 //
3614 // SSEUP should always be preceded by SSE, just widen.
3615 case SSEUp:
3616 assert(Lo == SSE && "Unexpected SSEUp classification.")((Lo == SSE && "Unexpected SSEUp classification.") ? static_cast
<void> (0) : __assert_fail ("Lo == SSE && \"Unexpected SSEUp classification.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3616, __PRETTY_FUNCTION__))
;
3617 ResType = GetByteVectorType(RetTy);
3618 break;
3619
3620 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3621 // returned together with the previous X87 value in %st0.
3622 case X87Up:
3623 // If X87Up is preceded by X87, we don't need to do
3624 // anything. However, in some cases with unions it may not be
3625 // preceded by X87. In such situations we follow gcc and pass the
3626 // extra bits in an SSE reg.
3627 if (Lo != X87) {
3628 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3629 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3630 return ABIArgInfo::getDirect(HighPart, 8);
3631 }
3632 break;
3633 }
3634
3635 // If a high part was specified, merge it together with the low part. It is
3636 // known to pass in the high eightbyte of the result. We do this by forming a
3637 // first class struct aggregate with the high and low part: {low, high}
3638 if (HighPart)
3639 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3640
3641 return ABIArgInfo::getDirect(ResType);
3642}
3643
3644ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3645 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3646 bool isNamedArg)
3647 const
3648{
3649 Ty = useFirstFieldIfTransparentUnion(Ty);
3650
3651 X86_64ABIInfo::Class Lo, Hi;
3652 classify(Ty, 0, Lo, Hi, isNamedArg);
3653
3654 // Check some invariants.
3655 // FIXME: Enforce these by construction.
3656 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.")(((Hi != Memory || Lo == Memory) && "Invalid memory classification."
) ? static_cast<void> (0) : __assert_fail ("(Hi != Memory || Lo == Memory) && \"Invalid memory classification.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3656, __PRETTY_FUNCTION__))
;
3657 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.")(((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."
) ? static_cast<void> (0) : __assert_fail ("(Hi != SSEUp || Lo == SSE) && \"Invalid SSEUp classification.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3657, __PRETTY_FUNCTION__))
;
3658
3659 neededInt = 0;
3660 neededSSE = 0;
3661 llvm::Type *ResType = nullptr;
3662 switch (Lo) {
3663 case NoClass:
3664 if (Hi == NoClass)
3665 return ABIArgInfo::getIgnore();
3666 // If the low part is just padding, it takes no register, leave ResType
3667 // null.
3668 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part"
) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3669, __PRETTY_FUNCTION__))
3669 "Unknown missing lo part")(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part"
) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3669, __PRETTY_FUNCTION__))
;
3670 break;
3671
3672 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3673 // on the stack.
3674 case Memory:
3675
3676 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3677 // COMPLEX_X87, it is passed in memory.
3678 case X87:
3679 case ComplexX87:
3680 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3681 ++neededInt;
3682 return getIndirectResult(Ty, freeIntRegs);
3683
3684 case SSEUp:
3685 case X87Up:
3686 llvm_unreachable("Invalid classification for lo word.")::llvm::llvm_unreachable_internal("Invalid classification for lo word."
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3686)
;
3687
3688 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3689 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3690 // and %r9 is used.
3691 case Integer:
3692 ++neededInt;
3693
3694 // Pick an 8-byte type based on the preferred type.
3695 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3696
3697 // If we have a sign or zero extended integer, make sure to return Extend
3698 // so that the parameter gets the right LLVM IR attributes.
3699 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3700 // Treat an enum type as its underlying type.
3701 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3702 Ty = EnumTy->getDecl()->getIntegerType();
3703
3704 if (Ty->isIntegralOrEnumerationType() &&
3705 isPromotableIntegerTypeForABI(Ty))
3706 return ABIArgInfo::getExtend(Ty);
3707 }
3708
3709 break;
3710
3711 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3712 // available SSE register is used, the registers are taken in the
3713 // order from %xmm0 to %xmm7.
3714 case SSE: {
3715 llvm::Type *IRType = CGT.ConvertType(Ty);
3716 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3717 ++neededSSE;
3718 break;
3719 }
3720 }
3721
3722 llvm::Type *HighPart = nullptr;
3723 switch (Hi) {
3724 // Memory was handled previously, ComplexX87 and X87 should
3725 // never occur as hi classes, and X87Up must be preceded by X87,
3726 // which is passed in memory.
3727 case Memory:
3728 case X87:
3729 case ComplexX87:
3730 llvm_unreachable("Invalid classification for hi word.")::llvm::llvm_unreachable_internal("Invalid classification for hi word."
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3730)
;
3731
3732 case NoClass: break;
3733
3734 case Integer:
3735 ++neededInt;
3736 // Pick an 8-byte type based on the preferred type.
3737 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3738
3739 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3740 return ABIArgInfo::getDirect(HighPart, 8);
3741 break;
3742
3743 // X87Up generally doesn't occur here (long double is passed in
3744 // memory), except in situations involving unions.
3745 case X87Up:
3746 case SSE:
3747 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3748
3749 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3750 return ABIArgInfo::getDirect(HighPart, 8);
3751
3752 ++neededSSE;
3753 break;
3754
3755 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3756 // eightbyte is passed in the upper half of the last used SSE
3757 // register. This only happens when 128-bit vectors are passed.
3758 case SSEUp:
3759 assert(Lo == SSE && "Unexpected SSEUp classification")((Lo == SSE && "Unexpected SSEUp classification") ? static_cast
<void> (0) : __assert_fail ("Lo == SSE && \"Unexpected SSEUp classification\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3759, __PRETTY_FUNCTION__))
;
3760 ResType = GetByteVectorType(Ty);
3761 break;
3762 }
3763
3764 // If a high part was specified, merge it together with the low part. It is
3765 // known to pass in the high eightbyte of the result. We do this by forming a
3766 // first class struct aggregate with the high and low part: {low, high}
3767 if (HighPart)
3768 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3769
3770 return ABIArgInfo::getDirect(ResType);
3771}
3772
3773ABIArgInfo
3774X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3775 unsigned &NeededSSE) const {
3776 auto RT = Ty->getAs<RecordType>();
3777 assert(RT && "classifyRegCallStructType only valid with struct types")((RT && "classifyRegCallStructType only valid with struct types"
) ? static_cast<void> (0) : __assert_fail ("RT && \"classifyRegCallStructType only valid with struct types\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 3777, __PRETTY_FUNCTION__))
;
3778
3779 if (RT->getDecl()->hasFlexibleArrayMember())
3780 return getIndirectReturnResult(Ty);
3781
3782 // Sum up bases
3783 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3784 if (CXXRD->isDynamicClass()) {
3785 NeededInt = NeededSSE = 0;
3786 return getIndirectReturnResult(Ty);
3787 }
3788
3789 for (const auto &I : CXXRD->bases())
3790 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3791 .isIndirect()) {
3792 NeededInt = NeededSSE = 0;
3793 return getIndirectReturnResult(Ty);
3794 }
3795 }
3796
3797 // Sum up members
3798 for (const auto *FD : RT->getDecl()->fields()) {
3799 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3800 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3801 .isIndirect()) {
3802 NeededInt = NeededSSE = 0;
3803 return getIndirectReturnResult(Ty);
3804 }
3805 } else {
3806 unsigned LocalNeededInt, LocalNeededSSE;
3807 if (classifyArgumentType(FD->getType(), UINT_MAX(2147483647 *2U +1U), LocalNeededInt,
3808 LocalNeededSSE, true)
3809 .isIndirect()) {
3810 NeededInt = NeededSSE = 0;
3811 return getIndirectReturnResult(Ty);
3812 }
3813 NeededInt += LocalNeededInt;
3814 NeededSSE += LocalNeededSSE;
3815 }
3816 }
3817
3818 return ABIArgInfo::getDirect();
3819}
3820
3821ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3822 unsigned &NeededInt,
3823 unsigned &NeededSSE) const {
3824
3825 NeededInt = 0;
3826 NeededSSE = 0;
3827
3828 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3829}
3830
3831void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3832
3833 const unsigned CallingConv = FI.getCallingConvention();
3834 // It is possible to force Win64 calling convention on any x86_64 target by
3835 // using __attribute__((ms_abi)). In such case to correctly emit Win64
3836 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3837 if (CallingConv == llvm::CallingConv::Win64) {
3838 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3839 Win64ABIInfo.computeInfo(FI);
3840 return;
3841 }
3842
3843 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3844
3845 // Keep track of the number of assigned registers.
3846 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3847 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3848 unsigned NeededInt, NeededSSE;
3849
3850 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
3851 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3852 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3853 FI.getReturnInfo() =
3854 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3855 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3856 FreeIntRegs -= NeededInt;
3857 FreeSSERegs -= NeededSSE;
3858 } else {
3859 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3860 }
3861 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
3862 getContext().getCanonicalType(FI.getReturnType()
3863 ->getAs<ComplexType>()
3864 ->getElementType()) ==
3865 getContext().LongDoubleTy)
3866 // Complex Long Double Type is passed in Memory when Regcall
3867 // calling convention is used.
3868 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3869 else
3870 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3871 }
3872
3873 // If the return value is indirect, then the hidden argument is consuming one
3874 // integer register.
3875 if (FI.getReturnInfo().isIndirect())
3876 --FreeIntRegs;
3877
3878 // The chain argument effectively gives us another free register.
3879 if (FI.isChainCall())
3880 ++FreeIntRegs;
3881
3882 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3883 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3884 // get assigned (in left-to-right order) for passing as follows...
3885 unsigned ArgNo = 0;
3886 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3887 it != ie; ++it, ++ArgNo) {
3888 bool IsNamedArg = ArgNo < NumRequiredArgs;
3889
3890 if (IsRegCall && it->type->isStructureOrClassType())
3891 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3892 else
3893 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3894 NeededSSE, IsNamedArg);
3895
3896 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3897 // eightbyte of an argument, the whole argument is passed on the
3898 // stack. If registers have already been assigned for some
3899 // eightbytes of such an argument, the assignments get reverted.
3900 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3901 FreeIntRegs -= NeededInt;
3902 FreeSSERegs -= NeededSSE;
3903 } else {
3904 it->info = getIndirectResult(it->type, FreeIntRegs);
3905 }
3906 }
3907}
3908
3909static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3910 Address VAListAddr, QualType Ty) {
3911 Address overflow_arg_area_p =
3912 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
3913 llvm::Value *overflow_arg_area =
3914 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3915
3916 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3917 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3918 // It isn't stated explicitly in the standard, but in practice we use
3919 // alignment greater than 16 where necessary.
3920 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3921 if (Align > CharUnits::fromQuantity(8)) {
3922 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3923 Align);
3924 }
3925
3926 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3927 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3928 llvm::Value *Res =
3929 CGF.Builder.CreateBitCast(overflow_arg_area,
3930 llvm::PointerType::getUnqual(LTy));
3931
3932 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3933 // l->overflow_arg_area + sizeof(type).
3934 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3935 // an 8 byte boundary.
3936
3937 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3938 llvm::Value *Offset =
3939 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3940 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3941 "overflow_arg_area.next");
3942 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3943
3944 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3945 return Address(Res, Align);
3946}
3947
3948Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3949 QualType Ty) const {
3950 // Assume that va_list type is correct; should be pointer to LLVM type:
3951 // struct {
3952 // i32 gp_offset;
3953 // i32 fp_offset;
3954 // i8* overflow_arg_area;
3955 // i8* reg_save_area;
3956 // };
3957 unsigned neededInt, neededSSE;
3958
3959 Ty = getContext().getCanonicalType(Ty);
3960 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3961 /*isNamedArg*/false);
3962
3963 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3964 // in the registers. If not go to step 7.
3965 if (!neededInt && !neededSSE)
3966 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3967
3968 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3969 // general purpose registers needed to pass type and num_fp to hold
3970 // the number of floating point registers needed.
3971
3972 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3973 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3974 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3975 //
3976 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3977 // register save space).
3978
3979 llvm::Value *InRegs = nullptr;
3980 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3981 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3982 if (neededInt) {
3983 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
3984 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3985 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3986 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3987 }
3988
3989 if (neededSSE) {
3990 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
3991 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3992 llvm::Value *FitsInFP =
3993 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3994 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3995 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3996 }
3997
3998 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3999 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
4000 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4001 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4002
4003 // Emit code to load the value if it was passed in registers.
4004
4005 CGF.EmitBlock(InRegBlock);
4006
4007 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
4008 // an offset of l->gp_offset and/or l->fp_offset. This may require
4009 // copying to a temporary location in case the parameter is passed
4010 // in different register classes or requires an alignment greater
4011 // than 8 for general purpose registers and 16 for XMM registers.
4012 //
4013 // FIXME: This really results in shameful code when we end up needing to
4014 // collect arguments from different places; often what should result in a
4015 // simple assembling of a structure from scattered addresses has many more
4016 // loads than necessary. Can we clean this up?
4017 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
4018 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
4019 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
4020
4021 Address RegAddr = Address::invalid();
4022 if (neededInt && neededSSE) {
4023 // FIXME: Cleanup.
4024 assert(AI.isDirect() && "Unexpected ABI info for mixed regs")((AI.isDirect() && "Unexpected ABI info for mixed regs"
) ? static_cast<void> (0) : __assert_fail ("AI.isDirect() && \"Unexpected ABI info for mixed regs\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 4024, __PRETTY_FUNCTION__))
;
4025 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
4026 Address Tmp = CGF.CreateMemTemp(Ty);
4027 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4028 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs")((ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"
) ? static_cast<void> (0) : __assert_fail ("ST->getNumElements() == 2 && \"Unexpected ABI info for mixed regs\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 4028, __PRETTY_FUNCTION__))
;
4029 llvm::Type *TyLo = ST->getElementType(0);
4030 llvm::Type *TyHi = ST->getElementType(1);
4031 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&(((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy())
&& "Unexpected ABI info for mixed regs") ? static_cast
<void> (0) : __assert_fail ("(TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && \"Unexpected ABI info for mixed regs\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 4032, __PRETTY_FUNCTION__))
4032 "Unexpected ABI info for mixed regs")(((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy())
&& "Unexpected ABI info for mixed regs") ? static_cast
<void> (0) : __assert_fail ("(TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && \"Unexpected ABI info for mixed regs\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 4032, __PRETTY_FUNCTION__))
;
4033 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
4034 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
4035 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
4036 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
4037 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
4038 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
4039
4040 // Copy the first element.
4041 // FIXME: Our choice of alignment here and below is probably pessimistic.
4042 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
4043 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
4044 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
4045 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4046
4047 // Copy the second element.
4048 V = CGF.Builder.CreateAlignedLoad(
4049 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
4050 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
4051 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4052
4053 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4054 } else if (neededInt) {
4055 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
4056 CharUnits::fromQuantity(8));
4057 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4058
4059 // Copy to a temporary if necessary to ensure the appropriate alignment.
4060 std::pair<CharUnits, CharUnits> SizeAlign =
4061 getContext().getTypeInfoInChars(Ty);
4062 uint64_t TySize = SizeAlign.first.getQuantity();
4063 CharUnits TyAlign = SizeAlign.second;
4064
4065 // Copy into a temporary if the type is more aligned than the
4066 // register save area.
4067 if (TyAlign.getQuantity() > 8) {
4068 Address Tmp = CGF.CreateMemTemp(Ty);
4069 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
4070 RegAddr = Tmp;
4071 }
4072
4073 } else if (neededSSE == 1) {
4074 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
4075 CharUnits::fromQuantity(16));
4076 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4077 } else {
4078 assert(neededSSE == 2 && "Invalid number of needed registers!")((neededSSE == 2 && "Invalid number of needed registers!"
) ? static_cast<void> (0) : __assert_fail ("neededSSE == 2 && \"Invalid number of needed registers!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 4078, __PRETTY_FUNCTION__))
;
4079 // SSE registers are spaced 16 bytes apart in the register save
4080 // area, we need to collect the two eightbytes together.
4081 // The ABI isn't explicit about this, but it seems reasonable
4082 // to assume that the slots are 16-byte aligned, since the stack is
4083 // naturally 16-byte aligned and the prologue is expected to store
4084 // all the SSE registers to the RSA.
4085 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
4086 CharUnits::fromQuantity(16));
4087 Address RegAddrHi =
4088 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
4089 CharUnits::fromQuantity(16));
4090 llvm::Type *ST = AI.canHaveCoerceToType()
4091 ? AI.getCoerceToType()
4092 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
4093 llvm::Value *V;
4094 Address Tmp = CGF.CreateMemTemp(Ty);
4095 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4096 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
4097 RegAddrLo, ST->getStructElementType(0)));
4098 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4099 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
4100 RegAddrHi, ST->getStructElementType(1)));
4101 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4102
4103 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4104 }
4105
4106 // AMD64-ABI 3.5.7p5: Step 5. Set:
4107 // l->gp_offset = l->gp_offset + num_gp * 8
4108 // l->fp_offset = l->fp_offset + num_fp * 16.
4109 if (neededInt) {
4110 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
4111 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
4112 gp_offset_p);
4113 }
4114 if (neededSSE) {
4115 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
4116 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
4117 fp_offset_p);
4118 }
4119 CGF.EmitBranch(ContBlock);
4120
4121 // Emit code to load the value if it was passed in memory.
4122
4123 CGF.EmitBlock(InMemBlock);
4124 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
4125
4126 // Return the appropriate result.
4127
4128 CGF.EmitBlock(ContBlock);
4129 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
4130 "vaarg.addr");
4131 return ResAddr;
4132}
4133
4134Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
4135 QualType Ty) const {
4136 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
4137 CGF.getContext().getTypeInfoInChars(Ty),
4138 CharUnits::fromQuantity(8),
4139 /*allowHigherAlign*/ false);
4140}
4141
4142ABIArgInfo
4143WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
4144 const ABIArgInfo &current) const {
4145 // Assumes vectorCall calling convention.
4146 const Type *Base = nullptr;
4147 uint64_t NumElts = 0;
4148
4149 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
4150 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
4151 FreeSSERegs -= NumElts;
4152 return getDirectX86Hva();
4153 }
4154 return current;
4155}
4156
4157ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
4158 bool IsReturnType, bool IsVectorCall,
4159 bool IsRegCall) const {
4160
4161 if (Ty->isVoidType())
4162 return ABIArgInfo::getIgnore();
4163
4164 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4165 Ty = EnumTy->getDecl()->getIntegerType();
4166
4167 TypeInfo Info = getContext().getTypeInfo(Ty);
4168 uint64_t Width = Info.Width;
4169 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
4170
4171 const RecordType *RT = Ty->getAs<RecordType>();
4172 if (RT) {
4173 if (!IsReturnType) {
4174 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
4175 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4176 }
4177
4178 if (RT->getDecl()->hasFlexibleArrayMember())
4179 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4180
4181 }
4182
4183 const Type *Base = nullptr;
4184 uint64_t NumElts = 0;
4185 // vectorcall adds the concept of a homogenous vector aggregate, similar to
4186 // other targets.
4187 if ((IsVectorCall || IsRegCall) &&
4188 isHomogeneousAggregate(Ty, Base, NumElts)) {
4189 if (IsRegCall) {
4190 if (FreeSSERegs >= NumElts) {
4191 FreeSSERegs -= NumElts;
4192 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
4193 return ABIArgInfo::getDirect();
4194 return ABIArgInfo::getExpand();
4195 }
4196 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4197 } else if (IsVectorCall) {
4198 if (FreeSSERegs >= NumElts &&
4199 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
4200 FreeSSERegs -= NumElts;
4201 return ABIArgInfo::getDirect();
4202 } else if (IsReturnType) {
4203 return ABIArgInfo::getExpand();
4204 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
4205 // HVAs are delayed and reclassified in the 2nd step.
4206 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4207 }
4208 }
4209 }
4210
4211 if (Ty->isMemberPointerType()) {
4212 // If the member pointer is represented by an LLVM int or ptr, pass it
4213 // directly.
4214 llvm::Type *LLTy = CGT.ConvertType(Ty);
4215 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
4216 return ABIArgInfo::getDirect();
4217 }
4218
4219 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
4220 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4221 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4222 if (Width > 64 || !llvm::isPowerOf2_64(Width))
4223 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4224
4225 // Otherwise, coerce it to a small integer.
4226 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
4227 }
4228
4229 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4230 switch (BT->getKind()) {
4231 case BuiltinType::Bool:
4232 // Bool type is always extended to the ABI, other builtin types are not
4233 // extended.
4234 return ABIArgInfo::getExtend(Ty);
4235
4236 case BuiltinType::LongDouble:
4237 // Mingw64 GCC uses the old 80 bit extended precision floating point
4238 // unit. It passes them indirectly through memory.
4239 if (IsMingw64) {
4240 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
4241 if (LDF == &llvm::APFloat::x87DoubleExtended())
4242 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4243 }
4244 break;
4245
4246 case BuiltinType::Int128:
4247 case BuiltinType::UInt128:
4248 // If it's a parameter type, the normal ABI rule is that arguments larger
4249 // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
4250 // even though it isn't particularly efficient.
4251 if (!IsReturnType)
4252 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4253
4254 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
4255 // Clang matches them for compatibility.
4256 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
4257 llvm::Type::getInt64Ty(getVMContext()), 2));
4258
4259 default:
4260 break;
4261 }
4262 }
4263
4264 if (Ty->isExtIntType()) {
4265 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4266 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4267 // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
4268 // anyway as long is it fits in them, so we don't have to check the power of
4269 // 2.
4270 if (Width <= 64)
4271 return ABIArgInfo::getDirect();
4272 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4273 }
4274
4275 return ABIArgInfo::getDirect();
4276}
4277
4278void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
4279 unsigned FreeSSERegs,
4280 bool IsVectorCall,
4281 bool IsRegCall) const {
4282 unsigned Count = 0;
4283 for (auto &I : FI.arguments()) {
4284 // Vectorcall in x64 only permits the first 6 arguments to be passed
4285 // as XMM/YMM registers.
4286 if (Count < VectorcallMaxParamNumAsReg)
4287 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4288 else {
4289 // Since these cannot be passed in registers, pretend no registers
4290 // are left.
4291 unsigned ZeroSSERegsAvail = 0;
4292 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
4293 IsVectorCall, IsRegCall);
4294 }
4295 ++Count;
4296 }
4297
4298 for (auto &I : FI.arguments()) {
4299 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4300 }
4301}
4302
4303void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4304 const unsigned CC = FI.getCallingConvention();
4305 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4306 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4307
4308 // If __attribute__((sysv_abi)) is in use, use the SysV argument
4309 // classification rules.
4310 if (CC == llvm::CallingConv::X86_64_SysV) {
4311 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4312 SysVABIInfo.computeInfo(FI);
4313 return;
4314 }
4315
4316 unsigned FreeSSERegs = 0;
4317 if (IsVectorCall) {
4318 // We can use up to 4 SSE return registers with vectorcall.
4319 FreeSSERegs = 4;
4320 } else if (IsRegCall) {
4321 // RegCall gives us 16 SSE registers.
4322 FreeSSERegs = 16;
4323 }
4324
4325 if (!getCXXABI().classifyReturnType(FI))
4326 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4327 IsVectorCall, IsRegCall);
4328
4329 if (IsVectorCall) {
4330 // We can use up to 6 SSE register parameters with vectorcall.
4331 FreeSSERegs = 6;
4332 } else if (IsRegCall) {
4333 // RegCall gives us 16 SSE registers, we can reuse the return registers.
4334 FreeSSERegs = 16;
4335 }
4336
4337 if (IsVectorCall) {
4338 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4339 } else {
4340 for (auto &I : FI.arguments())
4341 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4342 }
4343
4344}
4345
4346Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4347 QualType Ty) const {
4348
4349 bool IsIndirect = false;
4350
4351 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4352 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4353 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
4354 uint64_t Width = getContext().getTypeSize(Ty);
4355 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4356 }
4357
4358 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4359 CGF.getContext().getTypeInfoInChars(Ty),
4360 CharUnits::fromQuantity(8),
4361 /*allowHigherAlign*/ false);
4362}
4363
4364static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4365 llvm::Value *Address, bool Is64Bit,
4366 bool IsAIX) {
4367 // This is calculated from the LLVM and GCC tables and verified
4368 // against gcc output. AFAIK all PPC ABIs use the same encoding.
4369
4370 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4371
4372 llvm::IntegerType *i8 = CGF.Int8Ty;
4373 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4374 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4375 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4376
4377 // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
4378 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
4379
4380 // 32-63: fp0-31, the 8-byte floating-point registers
4381 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4382
4383 // 64-67 are various 4-byte or 8-byte special-purpose registers:
4384 // 64: mq
4385 // 65: lr
4386 // 66: ctr
4387 // 67: ap
4388 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
4389
4390 // 68-76 are various 4-byte special-purpose registers:
4391 // 68-75 cr0-7
4392 // 76: xer
4393 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4394
4395 // 77-108: v0-31, the 16-byte vector registers
4396 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4397
4398 // 109: vrsave
4399 // 110: vscr
4400 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
4401
4402 // AIX does not utilize the rest of the registers.
4403 if (IsAIX)
4404 return false;
4405
4406 // 111: spe_acc
4407 // 112: spefscr
4408 // 113: sfp
4409 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
4410
4411 if (!Is64Bit)
4412 return false;
4413
4414 // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
4415 // or above CPU.
4416 // 64-bit only registers:
4417 // 114: tfhar
4418 // 115: tfiar
4419 // 116: texasr
4420 AssignToArrayRange(Builder, Address, Eight8, 114, 116);
4421
4422 return false;
4423}
4424
4425// AIX
4426namespace {
4427/// AIXABIInfo - The AIX XCOFF ABI information.
4428class AIXABIInfo : public ABIInfo {
4429 const bool Is64Bit;
4430 const unsigned PtrByteSize;
4431 CharUnits getParamTypeAlignment(QualType Ty) const;
4432
4433public:
4434 AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4435 : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
4436
4437 bool isPromotableTypeForABI(QualType Ty) const;
4438
4439 ABIArgInfo classifyReturnType(QualType RetTy) const;
4440 ABIArgInfo classifyArgumentType(QualType Ty) const;
4441
4442 void computeInfo(CGFunctionInfo &FI) const override {
4443 if (!getCXXABI().classifyReturnType(FI))
4444 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4445
4446 for (auto &I : FI.arguments())
4447 I.info = classifyArgumentType(I.type);
4448 }
4449
4450 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4451 QualType Ty) const override;
4452};
4453
4454class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
4455 const bool Is64Bit;
4456
4457public:
4458 AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4459 : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
4460 Is64Bit(Is64Bit) {}
4461 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4462 return 1; // r1 is the dedicated stack pointer
4463 }
4464
4465 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4466 llvm::Value *Address) const override;
4467};
4468} // namespace
4469
4470// Return true if the ABI requires Ty to be passed sign- or zero-
4471// extended to 32/64 bits.
4472bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
4473 // Treat an enum type as its underlying type.
4474 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4475 Ty = EnumTy->getDecl()->getIntegerType();
4476
4477 // Promotable integer types are required to be promoted by the ABI.
4478 if (Ty->isPromotableIntegerType())
4479 return true;
4480
4481 if (!Is64Bit)
4482 return false;
4483
4484 // For 64 bit mode, in addition to the usual promotable integer types, we also
4485 // need to extend all 32-bit types, since the ABI requires promotion to 64
4486 // bits.
4487 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4488 switch (BT->getKind()) {
4489 case BuiltinType::Int:
4490 case BuiltinType::UInt:
4491 return true;
4492 default:
4493 break;
4494 }
4495
4496 return false;
4497}
4498
4499ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
4500 if (RetTy->isAnyComplexType())
4501 llvm::report_fatal_error("complex type is not supported on AIX yet");
4502
4503 if (RetTy->isVectorType())
4504 llvm::report_fatal_error("vector type is not supported on AIX yet");
4505
4506 if (RetTy->isVoidType())
4507 return ABIArgInfo::getIgnore();
4508
4509 // TODO: Evaluate if AIX power alignment rule would have an impact on the
4510 // alignment here.
4511 if (isAggregateTypeForABI(RetTy))
4512 return getNaturalAlignIndirect(RetTy);
4513
4514 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
4515 : ABIArgInfo::getDirect());
4516}
4517
4518ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
4519 Ty = useFirstFieldIfTransparentUnion(Ty);
4520
4521 if (Ty->isAnyComplexType())
4522 llvm::report_fatal_error("complex type is not supported on AIX yet");
4523
4524 if (Ty->isVectorType())
4525 llvm::report_fatal_error("vector type is not supported on AIX yet");
4526
4527 // TODO: Evaluate if AIX power alignment rule would have an impact on the
4528 // alignment here.
4529 if (isAggregateTypeForABI(Ty)) {
4530 // Records with non-trivial destructors/copy-constructors should not be
4531 // passed by value.
4532 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4533 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4534
4535 CharUnits CCAlign = getParamTypeAlignment(Ty);
4536 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
4537
4538 return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
4539 /*Realign*/ TyAlign > CCAlign);
4540 }
4541
4542 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
4543 : ABIArgInfo::getDirect());
4544}
4545
4546CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
4547 if (Ty->isAnyComplexType())
4548 llvm::report_fatal_error("complex type is not supported on AIX yet");
4549
4550 if (Ty->isVectorType())
4551 llvm::report_fatal_error("vector type is not supported on AIX yet");
4552
4553 // If the structure contains a vector type, the alignment is 16.
4554 if (isRecordWithSIMDVectorType(getContext(), Ty))
4555 return CharUnits::fromQuantity(16);
4556
4557 return CharUnits::fromQuantity(PtrByteSize);
4558}
4559
4560Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4561 QualType Ty) const {
4562 if (Ty->isAnyComplexType())
4563 llvm::report_fatal_error("complex type is not supported on AIX yet");
4564
4565 if (Ty->isVectorType())
4566 llvm::report_fatal_error("vector type is not supported on AIX yet");
4567
4568 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4569 TypeInfo.second = getParamTypeAlignment(Ty);
4570
4571 CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
4572
4573 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
4574 SlotSize, /*AllowHigher*/ true);
4575}
4576
4577bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
4578 CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
4579 return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
4580}
4581
4582// PowerPC-32
4583namespace {
4584/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4585class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4586 bool IsSoftFloatABI;
4587 bool IsRetSmallStructInRegABI;
4588
4589 CharUnits getParamTypeAlignment(QualType Ty) const;
4590
4591public:
4592 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
4593 bool RetSmallStructInRegABI)
4594 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
4595 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
4596
4597 ABIArgInfo classifyReturnType(QualType RetTy) const;
4598
4599 void computeInfo(CGFunctionInfo &FI) const override {
4600 if (!getCXXABI().classifyReturnType(FI))
4601 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4602 for (auto &I : FI.arguments())
4603 I.info = classifyArgumentType(I.type);
4604 }
4605
4606 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4607 QualType Ty) const override;
4608};
4609
4610class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4611public:
4612 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
4613 bool RetSmallStructInRegABI)
4614 : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
4615 CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
4616
4617 static bool isStructReturnInRegABI(const llvm::Triple &Triple,
4618 const CodeGenOptions &Opts);
4619
4620 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4621 // This is recovered from gcc output.
4622 return 1; // r1 is the dedicated stack pointer
4623 }
4624
4625 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4626 llvm::Value *Address) const override;
4627};
4628}
4629
4630CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4631 // Complex types are passed just like their elements.
4632 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4633 Ty = CTy->getElementType();
4634
4635 if (Ty->isVectorType())
4636 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4637 : 4);
4638
4639 // For single-element float/vector structs, we consider the whole type
4640 // to have the same alignment requirements as its single element.
4641 const Type *AlignTy = nullptr;
4642 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4643 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4644 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4645 (BT && BT->isFloatingPoint()))
4646 AlignTy = EltType;
4647 }
4648
4649 if (AlignTy)
4650 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4651 return CharUnits::fromQuantity(4);
4652}
4653
4654ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4655 uint64_t Size;
4656
4657 // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
4658 if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
4659 (Size = getContext().getTypeSize(RetTy)) <= 64) {
4660 // System V ABI (1995), page 3-22, specified:
4661 // > A structure or union whose size is less than or equal to 8 bytes
4662 // > shall be returned in r3 and r4, as if it were first stored in the
4663 // > 8-byte aligned memory area and then the low addressed word were
4664 // > loaded into r3 and the high-addressed word into r4. Bits beyond
4665 // > the last member of the structure or union are not defined.
4666 //
4667 // GCC for big-endian PPC32 inserts the pad before the first member,
4668 // not "beyond the last member" of the struct. To stay compatible
4669 // with GCC, we coerce the struct to an integer of the same size.
4670 // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
4671 if (Size == 0)
4672 return ABIArgInfo::getIgnore();
4673 else {
4674 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
4675 return ABIArgInfo::getDirect(CoerceTy);
4676 }
4677 }
4678
4679 return DefaultABIInfo::classifyReturnType(RetTy);
4680}
4681
4682// TODO: this implementation is now likely redundant with
4683// DefaultABIInfo::EmitVAArg.
4684Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4685 QualType Ty) const {
4686 if (getTarget().getTriple().isOSDarwin()) {
4687 auto TI = getContext().getTypeInfoInChars(Ty);
4688 TI.second = getParamTypeAlignment(Ty);
4689
4690 CharUnits SlotSize = CharUnits::fromQuantity(4);
4691 return emitVoidPtrVAArg(CGF, VAList, Ty,
4692 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4693 /*AllowHigherAlign=*/true);
4694 }
4695
4696 const unsigned OverflowLimit = 8;
4697 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4698 // TODO: Implement this. For now ignore.
4699 (void)CTy;
4700 return Address::invalid(); // FIXME?
4701 }
4702
4703 // struct __va_list_tag {
4704 // unsigned char gpr;
4705 // unsigned char fpr;
4706 // unsigned short reserved;
4707 // void *overflow_arg_area;
4708 // void *reg_save_area;
4709 // };
4710
4711 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4712 bool isInt =
4713 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4714 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4715
4716 // All aggregates are passed indirectly? That doesn't seem consistent
4717 // with the argument-lowering code.
4718 bool isIndirect = Ty->isAggregateType();
4719
4720 CGBuilderTy &Builder = CGF.Builder;
4721
4722 // The calling convention either uses 1-2 GPRs or 1 FPR.
4723 Address NumRegsAddr = Address::invalid();
4724 if (isInt || IsSoftFloatABI) {
4725 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
4726 } else {
4727 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
4728 }
4729
4730 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4731
4732 // "Align" the register count when TY is i64.
4733 if (isI64 || (isF64 && IsSoftFloatABI)) {
4734 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4735 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4736 }
4737
4738 llvm::Value *CC =
4739 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4740
4741 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4742 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4743 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4744
4745 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4746
4747 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4748 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4749
4750 // Case 1: consume registers.
4751 Address RegAddr = Address::invalid();
4752 {
4753 CGF.EmitBlock(UsingRegs);
4754
4755 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4756 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4757 CharUnits::fromQuantity(8));
4758 assert(RegAddr.getElementType() == CGF.Int8Ty)((RegAddr.getElementType() == CGF.Int8Ty) ? static_cast<void
> (0) : __assert_fail ("RegAddr.getElementType() == CGF.Int8Ty"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 4758, __PRETTY_FUNCTION__))
;
4759
4760 // Floating-point registers start after the general-purpose registers.
4761 if (!(isInt || IsSoftFloatABI)) {
4762 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4763 CharUnits::fromQuantity(32));
4764 }
4765
4766 // Get the address of the saved value by scaling the number of
4767 // registers we've used by the number of
4768 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4769 llvm::Value *RegOffset =
4770 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4771 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4772 RegAddr.getPointer(), RegOffset),
4773 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4774 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4775
4776 // Increase the used-register count.
4777 NumRegs =
4778 Builder.CreateAdd(NumRegs,
4779 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4780 Builder.CreateStore(NumRegs, NumRegsAddr);
4781
4782 CGF.EmitBranch(Cont);
4783 }
4784
4785 // Case 2: consume space in the overflow area.
4786 Address MemAddr = Address::invalid();
4787 {
4788 CGF.EmitBlock(UsingOverflow);
4789
4790 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4791
4792 // Everything in the overflow area is rounded up to a size of at least 4.
4793 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4794
4795 CharUnits Size;
4796 if (!isIndirect) {
4797 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4798 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
4799 } else {
4800 Size = CGF.getPointerSize();
4801 }
4802
4803 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4804 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4805 OverflowAreaAlign);
4806 // Round up address of argument to alignment
4807 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4808 if (Align > OverflowAreaAlign) {
4809 llvm::Value *Ptr = OverflowArea.getPointer();
4810 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4811 Align);
4812 }
4813
4814 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4815
4816 // Increase the overflow area.
4817 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4818 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4819 CGF.EmitBranch(Cont);
4820 }
4821
4822 CGF.EmitBlock(Cont);
4823
4824 // Merge the cases with a phi.
4825 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4826 "vaarg.addr");
4827
4828 // Load the pointer if the argument was passed indirectly.
4829 if (isIndirect) {
4830 Result = Address(Builder.CreateLoad(Result, "aggr"),
4831 getContext().getTypeAlignInChars(Ty));
4832 }
4833
4834 return Result;
4835}
4836
4837bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
4838 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
4839 assert(Triple.getArch() == llvm::Triple::ppc)((Triple.getArch() == llvm::Triple::ppc) ? static_cast<void
> (0) : __assert_fail ("Triple.getArch() == llvm::Triple::ppc"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 4839, __PRETTY_FUNCTION__))
;
4840
4841 switch (Opts.getStructReturnConvention()) {
4842 case CodeGenOptions::SRCK_Default:
4843 break;
4844 case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
4845 return false;
4846 case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
4847 return true;
4848 }
4849
4850 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
4851 return true;
4852
4853 return false;
4854}
4855
4856bool
4857PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4858 llvm::Value *Address) const {
4859 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
4860 /*IsAIX*/ false);
4861}
4862
4863// PowerPC-64
4864
4865namespace {
4866/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4867class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
4868public:
4869 enum ABIKind {
4870 ELFv1 = 0,
4871 ELFv2
4872 };
4873
4874private:
4875 static const unsigned GPRBits = 64;
4876 ABIKind Kind;
4877 bool HasQPX;
4878 bool IsSoftFloatABI;
4879
4880 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4881 // will be passed in a QPX register.
4882 bool IsQPXVectorTy(const Type *Ty) const {
4883 if (!HasQPX)
4884 return false;
4885
4886 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4887 unsigned NumElements = VT->getNumElements();
4888 if (NumElements == 1)
4889 return false;
4890
4891 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4892 if (getContext().getTypeSize(Ty) <= 256)
4893 return true;
4894 } else if (VT->getElementType()->
4895 isSpecificBuiltinType(BuiltinType::Float)) {
4896 if (getContext().getTypeSize(Ty) <= 128)
4897 return true;
4898 }
4899 }
4900
4901 return false;
4902 }
4903
4904 bool IsQPXVectorTy(QualType Ty) const {
4905 return IsQPXVectorTy(Ty.getTypePtr());
4906 }
4907
4908public:
4909 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4910 bool SoftFloatABI)
4911 : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4912 IsSoftFloatABI(SoftFloatABI) {}
4913
4914 bool isPromotableTypeForABI(QualType Ty) const;
4915 CharUnits getParamTypeAlignment(QualType Ty) const;
4916
4917 ABIArgInfo classifyReturnType(QualType RetTy) const;
4918 ABIArgInfo classifyArgumentType(QualType Ty) const;
4919
4920 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4921 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4922 uint64_t Members) const override;
4923
4924 // TODO: We can add more logic to computeInfo to improve performance.
4925 // Example: For aggregate arguments that fit in a register, we could
4926 // use getDirectInReg (as is done below for structs containing a single
4927 // floating-point value) to avoid pushing them to memory on function
4928 // entry. This would require changing the logic in PPCISelLowering
4929 // when lowering the parameters in the caller and args in the callee.
4930 void computeInfo(CGFunctionInfo &FI) const override {
4931 if (!getCXXABI().classifyReturnType(FI))
4932 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4933 for (auto &I : FI.arguments()) {
4934 // We rely on the default argument classification for the most part.
4935 // One exception: An aggregate containing a single floating-point
4936 // or vector item must be passed in a register if one is available.
4937 const Type *T = isSingleElementStruct(I.type, getContext());
4938 if (T) {
4939 const BuiltinType *BT = T->getAs<BuiltinType>();
4940 if (IsQPXVectorTy(T) ||
4941 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4942 (BT && BT->isFloatingPoint())) {
4943 QualType QT(T, 0);
4944 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4945 continue;
4946 }
4947 }
4948 I.info = classifyArgumentType(I.type);
4949 }
4950 }
4951
4952 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4953 QualType Ty) const override;
4954
4955 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4956 bool asReturnValue) const override {
4957 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4958 }
4959
4960 bool isSwiftErrorInRegister() const override {
4961 return false;
4962 }
4963};
4964
4965class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4966
4967public:
4968 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4969 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4970 bool SoftFloatABI)
4971 : TargetCodeGenInfo(std::make_unique<PPC64_SVR4_ABIInfo>(
4972 CGT, Kind, HasQPX, SoftFloatABI)) {}
4973
4974 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4975 // This is recovered from gcc output.
4976 return 1; // r1 is the dedicated stack pointer
4977 }
4978
4979 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4980 llvm::Value *Address) const override;
4981};
4982
4983class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4984public:
4985 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4986
4987 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4988 // This is recovered from gcc output.
4989 return 1; // r1 is the dedicated stack pointer
4990 }
4991
4992 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4993 llvm::Value *Address) const override;
4994};
4995
4996}
4997
4998// Return true if the ABI requires Ty to be passed sign- or zero-
4999// extended to 64 bits.
5000bool
5001PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
5002 // Treat an enum type as its underlying type.
5003 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5004 Ty = EnumTy->getDecl()->getIntegerType();
5005
5006 // Promotable integer types are required to be promoted by the ABI.
5007 if (isPromotableIntegerTypeForABI(Ty))
5008 return true;
5009
5010 // In addition to the usual promotable integer types, we also need to
5011 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
5012 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5013 switch (BT->getKind()) {
5014 case BuiltinType::Int:
5015 case BuiltinType::UInt:
5016 return true;
5017 default:
5018 break;
5019 }
5020
5021 if (const auto *EIT = Ty->getAs<ExtIntType>())
5022 if (EIT->getNumBits() < 64)
5023 return true;
5024
5025 return false;
5026}
5027
5028/// isAlignedParamType - Determine whether a type requires 16-byte or
5029/// higher alignment in the parameter area. Always returns at least 8.
5030CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
5031 // Complex types are passed just like their elements.
5032 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
5033 Ty = CTy->getElementType();
5034
5035 // Only vector types of size 16 bytes need alignment (larger types are
5036 // passed via reference, smaller types are not aligned).
5037 if (IsQPXVectorTy(Ty)) {
5038 if (getContext().getTypeSize(Ty) > 128)
5039 return CharUnits::fromQuantity(32);
5040
5041 return CharUnits::fromQuantity(16);
5042 } else if (Ty->isVectorType()) {
5043 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
5044 }
5045
5046 // For single-element float/vector structs, we consider the whole type
5047 // to have the same alignment requirements as its single element.
5048 const Type *AlignAsType = nullptr;
5049 const Type *EltType = isSingleElementStruct(Ty, getContext());
5050 if (EltType) {
5051 const BuiltinType *BT = EltType->getAs<BuiltinType>();
5052 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
5053 getContext().getTypeSize(EltType) == 128) ||
5054 (BT && BT->isFloatingPoint()))
5055 AlignAsType = EltType;
5056 }
5057
5058 // Likewise for ELFv2 homogeneous aggregates.
5059 const Type *Base = nullptr;
5060 uint64_t Members = 0;
5061 if (!AlignAsType && Kind == ELFv2 &&
5062 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
5063 AlignAsType = Base;
5064
5065 // With special case aggregates, only vector base types need alignment.
5066 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
5067 if (getContext().getTypeSize(AlignAsType) > 128)
5068 return CharUnits::fromQuantity(32);
5069
5070 return CharUnits::fromQuantity(16);
5071 } else if (AlignAsType) {
5072 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
5073 }
5074
5075 // Otherwise, we only need alignment for any aggregate type that
5076 // has an alignment requirement of >= 16 bytes.
5077 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
5078 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
5079 return CharUnits::fromQuantity(32);
5080 return CharUnits::fromQuantity(16);
5081 }
5082
5083 return CharUnits::fromQuantity(8);
5084}
5085
5086/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
5087/// aggregate. Base is set to the base element type, and Members is set
5088/// to the number of base elements.
5089bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
5090 uint64_t &Members) const {
5091 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
5092 uint64_t NElements = AT->getSize().getZExtValue();
5093 if (NElements == 0)
5094 return false;
5095 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
5096 return false;
5097 Members *= NElements;
5098 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
5099 const RecordDecl *RD = RT->getDecl();
5100 if (RD->hasFlexibleArrayMember())
5101 return false;
5102
5103 Members = 0;
5104
5105 // If this is a C++ record, check the bases first.
5106 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
5107 for (const auto &I : CXXRD->bases()) {
5108 // Ignore empty records.
5109 if (isEmptyRecord(getContext(), I.getType(), true))
5110 continue;
5111
5112 uint64_t FldMembers;
5113 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
5114 return false;
5115
5116 Members += FldMembers;
5117 }
5118 }
5119
5120 for (const auto *FD : RD->fields()) {
5121 // Ignore (non-zero arrays of) empty records.
5122 QualType FT = FD->getType();
5123 while (const ConstantArrayType *AT =
5124 getContext().getAsConstantArrayType(FT)) {
5125 if (AT->getSize().getZExtValue() == 0)
5126 return false;
5127 FT = AT->getElementType();
5128 }
5129 if (isEmptyRecord(getContext(), FT, true))
5130 continue;
5131
5132 // For compatibility with GCC, ignore empty bitfields in C++ mode.
5133 if (getContext().getLangOpts().CPlusPlus &&
5134 FD->isZeroLengthBitField(getContext()))
5135 continue;
5136
5137 uint64_t FldMembers;
5138 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
5139 return false;
5140
5141 Members = (RD->isUnion() ?
5142 std::max(Members, FldMembers) : Members + FldMembers);
5143 }
5144
5145 if (!Base)
5146 return false;
5147
5148 // Ensure there is no padding.
5149 if (getContext().getTypeSize(Base) * Members !=
5150 getContext().getTypeSize(Ty))
5151 return false;
5152 } else {
5153 Members = 1;
5154 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
5155 Members = 2;
5156 Ty = CT->getElementType();
5157 }
5158
5159 // Most ABIs only support float, double, and some vector type widths.
5160 if (!isHomogeneousAggregateBaseType(Ty))
5161 return false;
5162
5163 // The base type must be the same for all members. Types that
5164 // agree in both total size and mode (float vs. vector) are
5165 // treated as being equivalent here.
5166 const Type *TyPtr = Ty.getTypePtr();
5167 if (!Base) {
5168 Base = TyPtr;
5169 // If it's a non-power-of-2 vector, its size is already a power-of-2,
5170 // so make sure to widen it explicitly.
5171 if (const VectorType *VT = Base->getAs<VectorType>()) {
5172 QualType EltTy = VT->getElementType();
5173 unsigned NumElements =
5174 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
5175 Base = getContext()
5176 .getVectorType(EltTy, NumElements, VT->getVectorKind())
5177 .getTypePtr();
5178 }
5179 }
5180
5181 if (Base->isVectorType() != TyPtr->isVectorType() ||
5182 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
5183 return false;
5184 }
5185 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
5186}
5187
5188bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5189 // Homogeneous aggregates for ELFv2 must have base types of float,
5190 // double, long double, or 128-bit vectors.
5191 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5192 if (BT->getKind() == BuiltinType::Float ||
5193 BT->getKind() == BuiltinType::Double ||
5194 BT->getKind() == BuiltinType::LongDouble ||
5195 (getContext().getTargetInfo().hasFloat128Type() &&
5196 (BT->getKind() == BuiltinType::Float128))) {
5197 if (IsSoftFloatABI)
5198 return false;
5199 return true;
5200 }
5201 }
5202 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5203 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
5204 return true;
5205 }
5206 return false;
5207}
5208
5209bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
5210 const Type *Base, uint64_t Members) const {
5211 // Vector and fp128 types require one register, other floating point types
5212 // require one or two registers depending on their size.
5213 uint32_t NumRegs =
5214 ((getContext().getTargetInfo().hasFloat128Type() &&
5215 Base->isFloat128Type()) ||
5216 Base->isVectorType()) ? 1
5217 : (getContext().getTypeSize(Base) + 63) / 64;
5218
5219 // Homogeneous Aggregates may occupy at most 8 registers.
5220 return Members * NumRegs <= 8;
5221}
5222
5223ABIArgInfo
5224PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
5225 Ty = useFirstFieldIfTransparentUnion(Ty);
5226
5227 if (Ty->isAnyComplexType())
5228 return ABIArgInfo::getDirect();
5229
5230 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
5231 // or via reference (larger than 16 bytes).
5232 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
5233 uint64_t Size = getContext().getTypeSize(Ty);
5234 if (Size > 128)
5235 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5236 else if (Size < 128) {
5237 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5238 return ABIArgInfo::getDirect(CoerceTy);
5239 }
5240 }
5241
5242 if (const auto *EIT = Ty->getAs<ExtIntType>())
5243 if (EIT->getNumBits() > 128)
5244 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
5245
5246 if (isAggregateTypeForABI(Ty)) {
5247 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5248 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5249
5250 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
5251 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
5252
5253 // ELFv2 homogeneous aggregates are passed as array types.
5254 const Type *Base = nullptr;
5255 uint64_t Members = 0;
5256 if (Kind == ELFv2 &&
5257 isHomogeneousAggregate(Ty, Base, Members)) {
5258 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5259 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5260 return ABIArgInfo::getDirect(CoerceTy);
5261 }
5262
5263 // If an aggregate may end up fully in registers, we do not
5264 // use the ByVal method, but pass the aggregate as array.
5265 // This is usually beneficial since we avoid forcing the
5266 // back-end to store the argument to memory.
5267 uint64_t Bits = getContext().getTypeSize(Ty);
5268 if (Bits > 0 && Bits <= 8 * GPRBits) {
5269 llvm::Type *CoerceTy;
5270
5271 // Types up to 8 bytes are passed as integer type (which will be
5272 // properly aligned in the argument save area doubleword).
5273 if (Bits <= GPRBits)
5274 CoerceTy =
5275 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5276 // Larger types are passed as arrays, with the base type selected
5277 // according to the required alignment in the save area.
5278 else {
5279 uint64_t RegBits = ABIAlign * 8;
5280 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
5281 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
5282 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
5283 }
5284
5285 return ABIArgInfo::getDirect(CoerceTy);
5286 }
5287
5288 // All other aggregates are passed ByVal.
5289 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5290 /*ByVal=*/true,
5291 /*Realign=*/TyAlign > ABIAlign);
5292 }
5293
5294 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
5295 : ABIArgInfo::getDirect());
5296}
5297
5298ABIArgInfo
5299PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
5300 if (RetTy->isVoidType())
5301 return ABIArgInfo::getIgnore();
5302
5303 if (RetTy->isAnyComplexType())
5304 return ABIArgInfo::getDirect();
5305
5306 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
5307 // or via reference (larger than 16 bytes).
5308 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
5309 uint64_t Size = getContext().getTypeSize(RetTy);
5310 if (Size > 128)
5311 return getNaturalAlignIndirect(RetTy);
5312 else if (Size < 128) {
5313 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5314 return ABIArgInfo::getDirect(CoerceTy);
5315 }
5316 }
5317
5318 if (const auto *EIT = RetTy->getAs<ExtIntType>())
5319 if (EIT->getNumBits() > 128)
5320 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
5321
5322 if (isAggregateTypeForABI(RetTy)) {
5323 // ELFv2 homogeneous aggregates are returned as array types.
5324 const Type *Base = nullptr;
5325 uint64_t Members = 0;
5326 if (Kind == ELFv2 &&
5327 isHomogeneousAggregate(RetTy, Base, Members)) {
5328 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5329 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5330 return ABIArgInfo::getDirect(CoerceTy);
5331 }
5332
5333 // ELFv2 small aggregates are returned in up to two registers.
5334 uint64_t Bits = getContext().getTypeSize(RetTy);
5335 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
5336 if (Bits == 0)
5337 return ABIArgInfo::getIgnore();
5338
5339 llvm::Type *CoerceTy;
5340 if (Bits > GPRBits) {
5341 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
5342 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
5343 } else
5344 CoerceTy =
5345 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5346 return ABIArgInfo::getDirect(CoerceTy);
5347 }
5348
5349 // All other aggregates are returned indirectly.
5350 return getNaturalAlignIndirect(RetTy);
5351 }
5352
5353 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
5354 : ABIArgInfo::getDirect());
5355}
5356
5357// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
5358Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5359 QualType Ty) const {
5360 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
5361 TypeInfo.second = getParamTypeAlignment(Ty);
5362
5363 CharUnits SlotSize = CharUnits::fromQuantity(8);
5364
5365 // If we have a complex type and the base type is smaller than 8 bytes,
5366 // the ABI calls for the real and imaginary parts to be right-adjusted
5367 // in separate doublewords. However, Clang expects us to produce a
5368 // pointer to a structure with the two parts packed tightly. So generate
5369 // loads of the real and imaginary parts relative to the va_list pointer,
5370 // and store them to a temporary structure.
5371 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
5372 CharUnits EltSize = TypeInfo.first / 2;
5373 if (EltSize < SlotSize) {
5374 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
5375 SlotSize * 2, SlotSize,
5376 SlotSize, /*AllowHigher*/ true);
5377
5378 Address RealAddr = Addr;
5379 Address ImagAddr = RealAddr;
5380 if (CGF.CGM.getDataLayout().isBigEndian()) {
5381 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
5382 SlotSize - EltSize);
5383 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
5384 2 * SlotSize - EltSize);
5385 } else {
5386 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
5387 }
5388
5389 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
5390 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
5391 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
5392 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
5393 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
5394
5395 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
5396 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
5397 /*init*/ true);
5398 return Temp;
5399 }
5400 }
5401
5402 // Otherwise, just use the general rule.
5403 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
5404 TypeInfo, SlotSize, /*AllowHigher*/ true);
5405}
5406
5407bool
5408PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
5409 CodeGen::CodeGenFunction &CGF,
5410 llvm::Value *Address) const {
5411 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5412 /*IsAIX*/ false);
5413}
5414
5415bool
5416PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5417 llvm::Value *Address) const {
5418 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5419 /*IsAIX*/ false);
5420}
5421
5422//===----------------------------------------------------------------------===//
5423// AArch64 ABI Implementation
5424//===----------------------------------------------------------------------===//
5425
5426namespace {
5427
5428class AArch64ABIInfo : public SwiftABIInfo {
5429public:
5430 enum ABIKind {
5431 AAPCS = 0,
5432 DarwinPCS,
5433 Win64
5434 };
5435
5436private:
5437 ABIKind Kind;
5438
5439public:
5440 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
5441 : SwiftABIInfo(CGT), Kind(Kind) {}
5442
5443private:
5444 ABIKind getABIKind() const { return Kind; }
5445 bool isDarwinPCS() const { return Kind == DarwinPCS; }
5446
5447 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
5448 ABIArgInfo classifyArgumentType(QualType RetTy) const;
5449 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5450 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5451 uint64_t Members) const override;
5452
5453 bool isIllegalVectorType(QualType Ty) const;
5454
5455 void computeInfo(CGFunctionInfo &FI) const override {
5456 if (!::classifyReturnType(getCXXABI(), FI, *this))
5457 FI.getReturnInfo() =
5458 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5459
5460 for (auto &it : FI.arguments())
5461 it.info = classifyArgumentType(it.type);
5462 }
5463
5464 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5465 CodeGenFunction &CGF) const;
5466
5467 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
5468 CodeGenFunction &CGF) const;
5469
5470 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5471 QualType Ty) const override {
5472 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
5473 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
5474 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
5475 }
5476
5477 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5478 QualType Ty) const override;
5479
5480 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5481 bool asReturnValue) const override {
5482 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5483 }
5484 bool isSwiftErrorInRegister() const override {
5485 return true;
5486 }
5487
5488 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5489 unsigned elts) const override;
5490
5491 bool allowBFloatArgsAndRet() const override {
5492 return getTarget().hasBFloat16Type();
5493 }
5494};
5495
5496class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
5497public:
5498 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
5499 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {}
5500
5501 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5502 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5503 }
5504
5505 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5506 return 31;
5507 }
5508
5509 bool doesReturnSlotInterfereWithArgs() const override { return false; }
5510
5511 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5512 CodeGen::CodeGenModule &CGM) const override {
5513 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5514 if (!FD)
5515 return;
5516
5517 LangOptions::SignReturnAddressScopeKind Scope =
5518 CGM.getLangOpts().getSignReturnAddressScope();
5519 LangOptions::SignReturnAddressKeyKind Key =
5520 CGM.getLangOpts().getSignReturnAddressKey();
5521 bool BranchTargetEnforcement = CGM.getLangOpts().BranchTargetEnforcement;
5522 if (const auto *TA = FD->getAttr<TargetAttr>()) {
5523 ParsedTargetAttr Attr = TA->parse();
5524 if (!Attr.BranchProtection.empty()) {
5525 TargetInfo::BranchProtectionInfo BPI;
5526 StringRef Error;
5527 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
5528 BPI, Error);
5529 assert(Error.empty())((Error.empty()) ? static_cast<void> (0) : __assert_fail
("Error.empty()", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 5529, __PRETTY_FUNCTION__))
;
5530 Scope = BPI.SignReturnAddr;
5531 Key = BPI.SignKey;
5532 BranchTargetEnforcement = BPI.BranchTargetEnforcement;
5533 }
5534 }
5535
5536 auto *Fn = cast<llvm::Function>(GV);
5537 if (Scope != LangOptions::SignReturnAddressScopeKind::None) {
5538 Fn->addFnAttr("sign-return-address",
5539 Scope == LangOptions::SignReturnAddressScopeKind::All
5540 ? "all"
5541 : "non-leaf");
5542
5543 Fn->addFnAttr("sign-return-address-key",
5544 Key == LangOptions::SignReturnAddressKeyKind::AKey
5545 ? "a_key"
5546 : "b_key");
5547 }
5548
5549 if (BranchTargetEnforcement)
5550 Fn->addFnAttr("branch-target-enforcement");
5551 }
5552};
5553
5554class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
5555public:
5556 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5557 : AArch64TargetCodeGenInfo(CGT, K) {}
5558
5559 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5560 CodeGen::CodeGenModule &CGM) const override;
5561
5562 void getDependentLibraryOption(llvm::StringRef Lib,
5563 llvm::SmallString<24> &Opt) const override {
5564 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5565 }
5566
5567 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5568 llvm::SmallString<32> &Opt) const override {
5569 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5570 }
5571};
5572
5573void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5574 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5575 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5576 if (GV->isDeclaration())
5577 return;
5578 addStackProbeTargetAttributes(D, GV, CGM);
5579}
5580}
5581
5582ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
5583 Ty = useFirstFieldIfTransparentUnion(Ty);
5584
5585 // Handle illegal vector types here.
5586 if (isIllegalVectorType(Ty)) {
5587 uint64_t Size = getContext().getTypeSize(Ty);
5588 // Android promotes <2 x i8> to i16, not i32
5589 if (isAndroid() && (Size <= 16)) {
5590 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
5591 return ABIArgInfo::getDirect(ResType);
5592 }
5593 if (Size <= 32) {
5594 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
5595 return ABIArgInfo::getDirect(ResType);
5596 }
5597 if (Size == 64) {
5598 auto *ResType =
5599 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
5600 return ABIArgInfo::getDirect(ResType);
5601 }
5602 if (Size == 128) {
5603 auto *ResType =
5604 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
5605 return ABIArgInfo::getDirect(ResType);
5606 }
5607 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5608 }
5609
5610 if (!isAggregateTypeForABI(Ty)) {
5611 // Treat an enum type as its underlying type.
5612 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5613 Ty = EnumTy->getDecl()->getIntegerType();
5614
5615 if (const auto *EIT = Ty->getAs<ExtIntType>())
5616 if (EIT->getNumBits() > 128)
5617 return getNaturalAlignIndirect(Ty);
5618
5619 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
5620 ? ABIArgInfo::getExtend(Ty)
5621 : ABIArgInfo::getDirect());
5622 }
5623
5624 // Structures with either a non-trivial destructor or a non-trivial
5625 // copy constructor are always indirect.
5626 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5627 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
5628 CGCXXABI::RAA_DirectInMemory);
5629 }
5630
5631 // Empty records are always ignored on Darwin, but actually passed in C++ mode
5632 // elsewhere for GNU compatibility.
5633 uint64_t Size = getContext().getTypeSize(Ty);
5634 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
5635 if (IsEmpty || Size == 0) {
5636 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
5637 return ABIArgInfo::getIgnore();
5638
5639 // GNU C mode. The only argument that gets ignored is an empty one with size
5640 // 0.
5641 if (IsEmpty && Size == 0)
5642 return ABIArgInfo::getIgnore();
5643 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5644 }
5645
5646 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
5647 const Type *Base = nullptr;
5648 uint64_t Members = 0;
5649 if (isHomogeneousAggregate(Ty, Base, Members)) {
5650 return ABIArgInfo::getDirect(
5651 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
5652 }
5653
5654 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
5655 if (Size <= 128) {
5656 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5657 // same size and alignment.
5658 if (getTarget().isRenderScriptTarget()) {
5659 return coerceToIntArray(Ty, getContext(), getVMContext());
5660 }
5661 unsigned Alignment;
5662 if (Kind == AArch64ABIInfo::AAPCS) {
5663 Alignment = getContext().getTypeUnadjustedAlign(Ty);
5664 Alignment = Alignment < 128 ? 64 : 128;
5665 } else {
5666 Alignment = std::max(getContext().getTypeAlign(Ty),
5667 (unsigned)getTarget().getPointerWidth(0));
5668 }
5669 Size = llvm::alignTo(Size, Alignment);
5670
5671 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5672 // For aggregates with 16-byte alignment, we use i128.
5673 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
5674 return ABIArgInfo::getDirect(
5675 Size == Alignment ? BaseTy
5676 : llvm::ArrayType::get(BaseTy, Size / Alignment));
5677 }
5678
5679 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5680}
5681
5682ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
5683 bool IsVariadic) const {
5684 if (RetTy->isVoidType())
5685 return ABIArgInfo::getIgnore();
5686
5687 // Large vector types should be returned via memory.
5688 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
5689 return getNaturalAlignIndirect(RetTy);
5690
5691 if (!isAggregateTypeForABI(RetTy)) {
5692 // Treat an enum type as its underlying type.
5693 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5694 RetTy = EnumTy->getDecl()->getIntegerType();
5695
5696 if (const auto *EIT = RetTy->getAs<ExtIntType>())
5697 if (EIT->getNumBits() > 128)
5698 return getNaturalAlignIndirect(RetTy);
5699
5700 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
5701 ? ABIArgInfo::getExtend(RetTy)
5702 : ABIArgInfo::getDirect());
5703 }
5704
5705 uint64_t Size = getContext().getTypeSize(RetTy);
5706 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
5707 return ABIArgInfo::getIgnore();
5708
5709 const Type *Base = nullptr;
5710 uint64_t Members = 0;
5711 if (isHomogeneousAggregate(RetTy, Base, Members) &&
5712 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
5713 IsVariadic))
5714 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
5715 return ABIArgInfo::getDirect();
5716
5717 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
5718 if (Size <= 128) {
5719 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5720 // same size and alignment.
5721 if (getTarget().isRenderScriptTarget()) {
5722 return coerceToIntArray(RetTy, getContext(), getVMContext());
5723 }
5724 unsigned Alignment = getContext().getTypeAlign(RetTy);
5725 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5726
5727 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5728 // For aggregates with 16-byte alignment, we use i128.
5729 if (Alignment < 128 && Size == 128) {
5730 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5731 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5732 }
5733 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5734 }
5735
5736 return getNaturalAlignIndirect(RetTy);
5737}
5738
5739/// isIllegalVectorType - check whether the vector type is legal for AArch64.
5740bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5741 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5742 // Check whether VT is legal.
5743 unsigned NumElements = VT->getNumElements();
5744 uint64_t Size = getContext().getTypeSize(VT);
5745 // NumElements should be power of 2.
5746 if (!llvm::isPowerOf2_32(NumElements))
5747 return true;
5748
5749 // arm64_32 has to be compatible with the ARM logic here, which allows huge
5750 // vectors for some reason.
5751 llvm::Triple Triple = getTarget().getTriple();
5752 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
5753 Triple.isOSBinFormatMachO())
5754 return Size <= 32;
5755
5756 return Size != 64 && (Size != 128 || NumElements == 1);
5757 }
5758 return false;
5759}
5760
5761bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5762 llvm::Type *eltTy,
5763 unsigned elts) const {
5764 if (!llvm::isPowerOf2_32(elts))
5765 return false;
5766 if (totalSize.getQuantity() != 8 &&
5767 (totalSize.getQuantity() != 16 || elts == 1))
5768 return false;
5769 return true;
5770}
5771
5772bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5773 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5774 // point type or a short-vector type. This is the same as the 32-bit ABI,
5775 // but with the difference that any floating-point type is allowed,
5776 // including __fp16.
5777 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5778 if (BT->isFloatingPoint())
5779 return true;
5780 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5781 unsigned VecSize = getContext().getTypeSize(VT);
5782 if (VecSize == 64 || VecSize == 128)
5783 return true;
5784 }
5785 return false;
5786}
5787
5788bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5789 uint64_t Members) const {
5790 return Members <= 4;
5791}
5792
5793Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5794 QualType Ty,
5795 CodeGenFunction &CGF) const {
5796 ABIArgInfo AI = classifyArgumentType(Ty);
5797 bool IsIndirect = AI.isIndirect();
5798
5799 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5800 if (IsIndirect)
5801 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5802 else if (AI.getCoerceToType())
5803 BaseTy = AI.getCoerceToType();
5804
5805 unsigned NumRegs = 1;
5806 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5807 BaseTy = ArrTy->getElementType();
5808 NumRegs = ArrTy->getNumElements();
5809 }
5810 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5811
5812 // The AArch64 va_list type and handling is specified in the Procedure Call
5813 // Standard, section B.4:
5814 //
5815 // struct {
5816 // void *__stack;
5817 // void *__gr_top;
5818 // void *__vr_top;
5819 // int __gr_offs;
5820 // int __vr_offs;
5821 // };
5822
5823 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5824 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5825 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5826 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5827
5828 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
5829 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
5830
5831 Address reg_offs_p = Address::invalid();
5832 llvm::Value *reg_offs = nullptr;
5833 int reg_top_index;
5834 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
5835 if (!IsFPR) {
5836 // 3 is the field number of __gr_offs
5837 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
5838 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5839 reg_top_index = 1; // field number for __gr_top
5840 RegSize = llvm::alignTo(RegSize, 8);
5841 } else {
5842 // 4 is the field number of __vr_offs.
5843 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
5844 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5845 reg_top_index = 2; // field number for __vr_top
5846 RegSize = 16 * NumRegs;
5847 }
5848
5849 //=======================================
5850 // Find out where argument was passed
5851 //=======================================
5852
5853 // If reg_offs >= 0 we're already using the stack for this type of
5854 // argument. We don't want to keep updating reg_offs (in case it overflows,
5855 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5856 // whatever they get).
5857 llvm::Value *UsingStack = nullptr;
5858 UsingStack = CGF.Builder.CreateICmpSGE(
5859 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5860
5861 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5862
5863 // Otherwise, at least some kind of argument could go in these registers, the
5864 // question is whether this particular type is too big.
5865 CGF.EmitBlock(MaybeRegBlock);
5866
5867 // Integer arguments may need to correct register alignment (for example a
5868 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5869 // align __gr_offs to calculate the potential address.
5870 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5871 int Align = TyAlign.getQuantity();
5872
5873 reg_offs = CGF.Builder.CreateAdd(
5874 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5875 "align_regoffs");
5876 reg_offs = CGF.Builder.CreateAnd(
5877 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5878 "aligned_regoffs");
5879 }
5880
5881 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5882 // The fact that this is done unconditionally reflects the fact that
5883 // allocating an argument to the stack also uses up all the remaining
5884 // registers of the appropriate kind.
5885 llvm::Value *NewOffset = nullptr;
5886 NewOffset = CGF.Builder.CreateAdd(
5887 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5888 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5889
5890 // Now we're in a position to decide whether this argument really was in
5891 // registers or not.
5892 llvm::Value *InRegs = nullptr;
5893 InRegs = CGF.Builder.CreateICmpSLE(
5894 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5895
5896 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5897
5898 //=======================================
5899 // Argument was in registers
5900 //=======================================
5901
5902 // Now we emit the code for if the argument was originally passed in
5903 // registers. First start the appropriate block:
5904 CGF.EmitBlock(InRegBlock);
5905
5906 llvm::Value *reg_top = nullptr;
5907 Address reg_top_p =
5908 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
5909 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5910 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5911 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5912 Address RegAddr = Address::invalid();
5913 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5914
5915 if (IsIndirect) {
5916 // If it's been passed indirectly (actually a struct), whatever we find from
5917 // stored registers or on the stack will actually be a struct **.
5918 MemTy = llvm::PointerType::getUnqual(MemTy);
5919 }
5920
5921 const Type *Base = nullptr;
5922 uint64_t NumMembers = 0;
5923 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5924 if (IsHFA && NumMembers > 1) {
5925 // Homogeneous aggregates passed in registers will have their elements split
5926 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5927 // qN+1, ...). We reload and store into a temporary local variable
5928 // contiguously.
5929 assert(!IsIndirect && "Homogeneous aggregates should be passed directly")((!IsIndirect && "Homogeneous aggregates should be passed directly"
) ? static_cast<void> (0) : __assert_fail ("!IsIndirect && \"Homogeneous aggregates should be passed directly\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/clang/lib/CodeGen/TargetInfo.cpp"
, 5929, __PRETTY_FUNCTION__))
;
5930 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5931 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5932 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5933 Address Tmp = CGF.CreateTempAlloca(HFATy,
5934 std::max(TyAlign, BaseTyInfo.second));
5935
5936 // On big-endian platforms, the value will be right-aligned in its slot.
5937 int Offset = 0;
5938 if (CGF.CGM.getDataLayout().isBigEndian() &&
5939 BaseTyInfo.first.getQuantity() < 16)
5940 Offset = 16 - BaseTyInfo.first.getQuantity();
5941
5942 for (unsigned i = 0; i < NumMembers; ++i) {
5943 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5944 Address LoadAddr =
5945 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5946 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5947
5948 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
5949
5950 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5951 CGF.Builder.CreateStore(Elem, StoreAddr);
5952 }
5953
5954 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5955 } else {
5956 // Otherwise the object is contiguous in memory.
5957
5958 // It might be right-aligned in its slot.
5959 CharUnits SlotSize = BaseAddr.getAlignment();
5960 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
5961 (IsHFA || !isAggregateTypeForABI(Ty)) &&
5962 TySize < SlotSize) {
5963 CharUnits Offset = SlotSize - TySize;
5964 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
5965 }
5966
5967 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
5968 }
5969
5970 CGF.EmitBranch(ContBlock);
5971
5972 //=======================================
5973 // Argument was on the stack
5974 //=======================================
5975 CGF.EmitBlock(OnStackBlock);
5976
5977 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
5978 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
5979
5980 // Again, stack arguments may need realignment. In this case both integer and
5981 // floating-point ones might be affected.
5982 if (!IsIndirect && TyAlign.getQuantity() > 8) {
5983 int Align = TyAlign.getQuantity();
5984
5985 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
5986
5987 OnStackPtr = CGF.Builder.CreateAdd(
5988 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
5989 "align_stack");
5990 OnStackPtr = CGF.Builder.CreateAnd(
5991 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
5992 "align_stack");
5993
5994 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
5995 }
5996 Address OnStackAddr(OnStackPtr,
5997 std::max(CharUnits::fromQuantity(8), TyAlign));
5998
5999 // All stack slots are multiples of 8 bytes.
6000 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
6001 CharUnits StackSize;
6002 if (IsIndirect)
6003 StackSize = StackSlotSize;
6004 else
6005 StackSize = TySize.alignTo(StackSlotSize);
6006
6007 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
6008 llvm::Value *NewStack =
6009 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
6010
6011 // Write the new value of __stack for the next call to va_arg
6012 CGF.Builder.CreateStore(NewStack, stack_p);
6013
6014 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
6015 TySize < StackSlotSize) {
6016 CharUnits Offset = StackSlotSize - TySize;
6017 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
6018 }
6019
6020 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
6021
6022 CGF.EmitBranch(ContBlock);
6023
6024 //=======================================
6025 // Tidy up
6026 //=======================================
6027 CGF.EmitBlock(ContBlock);
6028
6029 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6030 OnStackAddr, OnStackBlock, "vaargs.addr");
6031
6032 if (IsIndirect)
6033 return Address(CGF.Builder.CreateLoad(ResAddr,