24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/Support/raw_ostream.h"
31 using namespace clang;
32 using namespace CodeGen;
50 llvm::LLVMContext &LLVMContext) {
54 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
55 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
65 for (
unsigned I = FirstIndex;
I <= LastIndex; ++
I) {
67 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array,
I);
81 ByRef, Realign, Padding);
112 unsigned maxAllRegisters) {
113 unsigned intCount = 0, fpCount = 0;
115 if (
type->isPointerTy()) {
117 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
119 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
121 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
126 return (intCount + fpCount > maxAllRegisters);
131 unsigned numElts)
const {
158 if (UD->
hasAttr<TransparentUnionAttr>()) {
159 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
197 uint64_t Members)
const {
206 raw_ostream &OS = llvm::errs();
207 OS <<
"(ABIArgInfo Kind=";
210 OS <<
"Direct Type=";
234 OS <<
"CoerceAndExpand Type=";
248 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
250 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
252 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
254 Ptr->getName() +
".aligned");
278 bool AllowHigherAlign) {
288 if (AllowHigherAlign && DirectAlign > SlotSize) {
305 !DirectTy->isStructTy()) {
328 std::pair<CharUnits, CharUnits> ValueInfo,
330 bool AllowHigherAlign) {
337 DirectSize = ValueInfo.first;
338 DirectAlign = ValueInfo.second;
344 DirectTy = DirectTy->getPointerTo(0);
347 DirectSize, DirectAlign,
360 Address Addr1, llvm::BasicBlock *Block1,
361 Address Addr2, llvm::BasicBlock *Block2,
362 const llvm::Twine &
Name =
"") {
415 return llvm::CallingConv::SPIR_KERNEL;
419 llvm::PointerType *T,
QualType QT)
const {
420 return llvm::ConstantPointerNull::get(T);
427 "Address space agnostic languages only");
434 unsigned DestAddr,
llvm::Type *DestTy,
bool isNonNull)
const {
437 if (
auto *C = dyn_cast<llvm::Constant>(Src))
444 unsigned SrcAddr,
unsigned DestAddr,
448 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
466 if (AT->getSize() == 0)
468 FT = AT->getElementType();
479 if (isa<CXXRecordDecl>(RT->
getDecl()))
497 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
498 for (
const auto &
I : CXXRD->bases())
502 for (
const auto *
I : RD->
fields())
525 const Type *Found =
nullptr;
528 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
529 for (
const auto &
I : CXXRD->bases()) {
547 for (
const auto *FD : RD->
fields()) {
561 if (AT->getSize().getZExtValue() != 1)
563 FT = AT->getElementType();
599 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
602 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
611 return Address(Addr, TyAlignForABI);
614 "Unexpected ArgInfo Kind in generic VAArg emitter!");
617 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
619 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
621 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
623 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
636 class DefaultABIInfo :
public ABIInfo {
671 return getNaturalAlignIndirect(Ty);
676 Ty = EnumTy->getDecl()->getIntegerType();
687 return getNaturalAlignIndirect(RetTy);
691 RetTy = EnumTy->getDecl()->getIntegerType();
703 class WebAssemblyABIInfo final :
public DefaultABIInfo {
706 : DefaultABIInfo(CGT) {}
778 getContext().getTypeInfoInChars(Ty),
790 class PNaClABIInfo :
public ABIInfo {
832 return getNaturalAlignIndirect(Ty);
835 Ty = EnumTy->getDecl()->getIntegerType();
851 return getNaturalAlignIndirect(RetTy);
855 RetTy = EnumTy->getDecl()->getIntegerType();
864 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
865 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
866 IRType->getScalarSizeInBits() != 64;
870 StringRef Constraint,
872 if ((Constraint ==
"y" || Constraint ==
"&y") && Ty->isVectorTy()) {
873 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
889 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
895 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
903 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
904 return NumMembers <= 4;
921 CCState(
unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
925 unsigned FreeSSERegs;
930 VectorcallMaxParamNumAsReg = 6
940 static const unsigned MinABIStackAlignInBytes = 4;
942 bool IsDarwinVectorABI;
943 bool IsRetSmallStructInRegABI;
944 bool IsWin32StructABI;
947 unsigned DefaultNumRegisterParameters;
949 static bool isRegisterSize(
unsigned Size) {
950 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
953 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
955 return isX86VectorTypeForVectorCall(getContext(), Ty);
958 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
959 uint64_t NumMembers)
const override {
961 return isX86VectorCallAggregateSmallEnough(NumMembers);
973 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
983 bool shouldAggregateUseDirect(
QualType Ty, CCState &
State,
bool &InReg,
984 bool &NeedsPadding)
const;
985 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &
State)
const;
987 bool canExpandIndirectArgument(
QualType Ty)
const;
997 bool &UsedInAlloca)
const;
1006 bool RetSmallStructInRegABI,
bool Win32StructABI,
1007 unsigned NumRegisterParameters,
bool SoftFloatABI)
1008 :
SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1009 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1010 IsWin32StructABI(Win32StructABI),
1011 IsSoftFloatABI(SoftFloatABI),
1012 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1013 DefaultNumRegisterParameters(NumRegisterParameters) {}
1015 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
1017 bool asReturnValue)
const override {
1025 bool isSwiftErrorInRegister()
const override {
1034 bool RetSmallStructInRegABI,
bool Win32StructABI,
1035 unsigned NumRegisterParameters,
bool SoftFloatABI)
1037 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1038 NumRegisterParameters, SoftFloatABI)) {}
1040 static bool isStructReturnInRegABI(
1043 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1056 StringRef Constraint,
1058 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1062 std::string &Constraints,
1063 std::vector<llvm::Type *> &ResultRegTypes,
1064 std::vector<llvm::Type *> &ResultTruncRegTypes,
1065 std::vector<LValue> &ResultRegDests,
1066 std::string &AsmString,
1067 unsigned NumOutputs)
const override;
1071 unsigned Sig = (0xeb << 0) |
1075 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1078 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
1079 return "movl\t%ebp, %ebp"
1080 "\t\t## marker for objc_retainAutoreleaseReturnValue";
1095 unsigned NumNewOuts,
1096 std::string &AsmString) {
1098 llvm::raw_string_ostream OS(Buf);
1100 while (Pos < AsmString.size()) {
1101 size_t DollarStart = AsmString.find(
'$', Pos);
1102 if (DollarStart == std::string::npos)
1103 DollarStart = AsmString.size();
1104 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1105 if (DollarEnd == std::string::npos)
1106 DollarEnd = AsmString.size();
1107 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1109 size_t NumDollars = DollarEnd - DollarStart;
1110 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1112 size_t DigitStart = Pos;
1113 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1114 if (DigitEnd == std::string::npos)
1115 DigitEnd = AsmString.size();
1116 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1117 unsigned OperandIndex;
1118 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1119 if (OperandIndex >= FirstIn)
1120 OperandIndex += NumNewOuts;
1128 AsmString = std::move(OS.str());
1132 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1134 std::vector<llvm::Type *> &ResultRegTypes,
1135 std::vector<llvm::Type *> &ResultTruncRegTypes,
1136 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1137 unsigned NumOutputs)
const {
1142 if (!Constraints.empty())
1144 if (RetWidth <= 32) {
1145 Constraints +=
"={eax}";
1146 ResultRegTypes.push_back(CGF.
Int32Ty);
1149 Constraints +=
"=A";
1150 ResultRegTypes.push_back(CGF.
Int64Ty);
1155 ResultTruncRegTypes.push_back(CoerceTy);
1159 CoerceTy->getPointerTo()));
1160 ResultRegDests.push_back(ReturnSlot);
1167 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1173 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1179 if (Size == 64 || Size == 128)
1194 return shouldReturnTypeInRegister(AT->getElementType(),
Context);
1198 if (!RT)
return false;
1210 if (!shouldReturnTypeInRegister(FD->getType(),
Context))
1219 Ty = CTy->getElementType();
1229 return Size == 32 || Size == 64;
1234 for (
const auto *FD : RD->
fields()) {
1244 if (FD->isBitField())
1269 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1276 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1277 if (!IsWin32StructABI) {
1280 if (!CXXRD->isCLike())
1284 if (CXXRD->isDynamicClass())
1295 return Size == getContext().getTypeSize(Ty);
1301 if (State.FreeRegs) {
1304 return getNaturalAlignIndirectInReg(RetTy);
1306 return getNaturalAlignIndirect(RetTy,
false);
1310 CCState &State)
const {
1315 uint64_t NumElts = 0;
1316 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1317 State.CC == llvm::CallingConv::X86_RegCall) &&
1318 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1325 if (IsDarwinVectorABI) {
1326 uint64_t Size = getContext().getTypeSize(RetTy);
1333 llvm::Type::getInt64Ty(getVMContext()), 2));
1337 if ((Size == 8 || Size == 16 || Size == 32) ||
1338 (Size == 64 && VT->getNumElements() == 1))
1342 return getIndirectReturnResult(RetTy, State);
1352 return getIndirectReturnResult(RetTy, State);
1357 return getIndirectReturnResult(RetTy, State);
1365 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1366 uint64_t Size = getContext().getTypeSize(RetTy);
1374 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1375 || SeltTy->hasPointerRepresentation())
1383 return getIndirectReturnResult(RetTy, State);
1388 RetTy = EnumTy->getDecl()->getIntegerType();
1405 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1406 for (
const auto &
I : CXXRD->bases())
1410 for (
const auto *i : RD->
fields()) {
1423 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1424 unsigned Align)
const {
1427 if (Align <= MinABIStackAlignInBytes)
1431 if (!IsDarwinVectorABI) {
1433 return MinABIStackAlignInBytes;
1441 return MinABIStackAlignInBytes;
1445 CCState &State)
const {
1447 if (State.FreeRegs) {
1450 return getNaturalAlignIndirectInReg(Ty);
1452 return getNaturalAlignIndirect(Ty,
false);
1456 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1457 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1458 if (StackAlign == 0)
1463 bool Realign = TypeAlign > StackAlign;
1468 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1475 if (K == BuiltinType::Float || K == BuiltinType::Double)
1481 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1482 if (!IsSoftFloatABI) {
1488 unsigned Size = getContext().getTypeSize(Ty);
1489 unsigned SizeInRegs = (Size + 31) / 32;
1491 if (SizeInRegs == 0)
1495 if (SizeInRegs > State.FreeRegs) {
1504 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1508 State.FreeRegs -= SizeInRegs;
1512 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1514 bool &NeedsPadding)
const {
1521 NeedsPadding =
false;
1524 if (!updateFreeRegs(Ty, State))
1530 if (State.CC == llvm::CallingConv::X86_FastCall ||
1531 State.CC == llvm::CallingConv::X86_VectorCall ||
1532 State.CC == llvm::CallingConv::X86_RegCall) {
1533 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1534 NeedsPadding =
true;
1542 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1543 if (!updateFreeRegs(Ty, State))
1549 if (State.CC == llvm::CallingConv::X86_FastCall ||
1550 State.CC == llvm::CallingConv::X86_VectorCall ||
1551 State.CC == llvm::CallingConv::X86_RegCall) {
1552 if (getContext().getTypeSize(Ty) > 32)
1563 CCState &State)
const {
1573 return getIndirectResult(Ty,
false, State);
1582 const Type *Base =
nullptr;
1583 uint64_t NumElts = 0;
1584 if (State.CC == llvm::CallingConv::X86_RegCall &&
1585 isHomogeneousAggregate(Ty, Base, NumElts)) {
1587 if (State.FreeSSERegs >= NumElts) {
1588 State.FreeSSERegs -= NumElts;
1593 return getIndirectResult(Ty,
false, State);
1600 return getIndirectResult(Ty,
true, State);
1603 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
1606 llvm::LLVMContext &LLVMContext = getVMContext();
1607 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1608 bool NeedsPadding =
false;
1610 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1611 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1619 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1627 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1628 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1630 State.CC == llvm::CallingConv::X86_FastCall ||
1631 State.CC == llvm::CallingConv::X86_VectorCall ||
1632 State.CC == llvm::CallingConv::X86_RegCall,
1635 return getIndirectResult(Ty,
true, State);
1641 if (IsDarwinVectorABI) {
1642 uint64_t Size = getContext().getTypeSize(Ty);
1643 if ((Size == 8 || Size == 16 || Size == 32) ||
1644 (Size == 64 && VT->getNumElements() == 1))
1649 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1657 Ty = EnumTy->getDecl()->getIntegerType();
1659 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1672 void X86_32ABIInfo::computeVectorCallArgs(
CGFunctionInfo &FI, CCState &State,
1673 bool &UsedInAlloca)
const {
1686 const Type *Base =
nullptr;
1687 uint64_t NumElts = 0;
1690 isHomogeneousAggregate(Ty, Base, NumElts)) {
1691 if (State.FreeSSERegs >= NumElts) {
1692 State.FreeSSERegs -= NumElts;
1703 const Type *Base =
nullptr;
1704 uint64_t NumElts = 0;
1706 bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts);
1710 if (State.FreeSSERegs >= NumElts) {
1711 State.FreeSSERegs -= NumElts;
1712 I.info = getDirectX86Hva();
1714 I.info = getIndirectResult(Ty,
false, State);
1716 }
else if (!IsHva) {
1728 else if (State.CC == llvm::CallingConv::X86_FastCall)
1730 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1732 State.FreeSSERegs = 6;
1735 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1737 State.FreeSSERegs = 8;
1739 State.FreeRegs = DefaultNumRegisterParameters;
1746 if (State.FreeRegs) {
1757 bool UsedInAlloca =
false;
1758 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1759 computeVectorCallArgs(FI, State, UsedInAlloca);
1771 rewriteWithInAlloca(FI);
1781 assert(StackOffset.
isMultipleOf(FieldAlign) &&
"unaligned inalloca struct");
1783 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1784 StackOffset += getContext().getTypeSizeInChars(Type);
1788 StackOffset = FieldEnd.
alignTo(FieldAlign);
1789 if (StackOffset != FieldEnd) {
1790 CharUnits NumBytes = StackOffset - FieldEnd;
1791 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1792 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1793 FrameFields.push_back(Ty);
1818 llvm_unreachable(
"invalid enum");
1821 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1822 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1839 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1846 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1856 for (; I !=
E; ++
I) {
1858 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1861 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1869 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1876 getTypeStackAlignInBytes(Ty,
TypeInfo.second.getQuantity()));
1883 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1885 assert(Triple.getArch() == llvm::Triple::x86);
1887 switch (Opts.getStructReturnConvention()) {
1896 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1899 switch (Triple.getOS()) {
1900 case llvm::Triple::DragonFly:
1901 case llvm::Triple::FreeBSD:
1902 case llvm::Triple::OpenBSD:
1903 case llvm::Triple::Bitrig:
1904 case llvm::Triple::Win32:
1911 void X86_32TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
1912 llvm::GlobalValue *GV,
1914 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1915 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1917 llvm::Function *Fn = cast<llvm::Function>(GV);
1920 llvm::AttrBuilder B;
1921 B.addStackAlignmentAttr(16);
1922 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
1924 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1925 llvm::Function *Fn = cast<llvm::Function>(GV);
1926 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1931 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1954 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
1981 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1983 case X86AVXABILevel::AVX512:
1985 case X86AVXABILevel::AVX:
1990 llvm_unreachable(
"Unknown AVXLevel");
2015 static Class merge(Class Accum, Class Field);
2031 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
2057 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2058 bool isNamedArg)
const;
2062 unsigned IROffset,
QualType SourceTy,
2063 unsigned SourceOffset)
const;
2065 unsigned IROffset,
QualType SourceTy,
2066 unsigned SourceOffset)
const;
2082 unsigned &neededInt,
unsigned &neededSSE,
2083 bool isNamedArg)
const;
2086 unsigned &NeededSSE)
const;
2089 unsigned &NeededSSE)
const;
2091 bool IsIllegalVectorType(
QualType Ty)
const;
2098 bool honorsRevision0_98()
const {
2099 return !getTarget().getTriple().isOSDarwin();
2104 bool classifyIntegerMMXAsSSE()
const {
2106 if (getCodeGenOpts().getClangABICompat() <=
2110 const llvm::Triple &Triple = getTarget().getTriple();
2111 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2113 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2121 bool Has64BitPointers;
2126 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2130 unsigned neededInt, neededSSE;
2136 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2137 return (vectorTy->getBitWidth() > 128);
2149 bool has64BitPointers()
const {
2150 return Has64BitPointers;
2153 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
2155 bool asReturnValue)
const override {
2158 bool isSwiftErrorInRegister()
const override {
2168 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2175 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
2177 return isX86VectorTypeForVectorCall(getContext(), Ty);
2180 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
2181 uint64_t NumMembers)
const override {
2183 return isX86VectorCallAggregateSmallEnough(NumMembers);
2186 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
2188 bool asReturnValue)
const override {
2192 bool isSwiftErrorInRegister()
const override {
2198 bool IsVectorCall,
bool IsRegCall)
const;
2201 void computeVectorCallArgs(
CGFunctionInfo &FI,
unsigned FreeSSERegs,
2202 bool IsVectorCall,
bool IsRegCall)
const;
2212 const X86_64ABIInfo &getABIInfo()
const {
2231 StringRef Constraint,
2233 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2236 bool isNoProtoCallVariadic(
const CallArgList &args,
2245 bool HasAVXType =
false;
2246 for (CallArgList::const_iterator
2247 it = args.begin(), ie = args.end(); it != ie; ++it) {
2248 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2264 if (getABIInfo().has64BitPointers())
2274 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2277 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2279 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2280 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2281 llvm::Function *Fn = cast<llvm::Function>(GV);
2282 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2288 class PS4TargetCodeGenInfo :
public X86_64TargetCodeGenInfo {
2291 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2293 void getDependentLibraryOption(llvm::StringRef Lib,
2297 if (Lib.find(
" ") != StringRef::npos)
2298 Opt +=
"\"" + Lib.str() +
"\"";
2304 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2308 bool Quote = (Lib.find(
" ") != StringRef::npos);
2309 std::string ArgStr = Quote ?
"\"" :
"";
2311 if (!Lib.endswith_lower(
".lib"))
2313 ArgStr += Quote ?
"\"" :
"";
2317 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2320 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2321 unsigned NumRegisterParameters)
2322 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2323 Win32StructABI, NumRegisterParameters,
false) {}
2325 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2328 void getDependentLibraryOption(llvm::StringRef Lib,
2330 Opt =
"/DEFAULTLIB:";
2331 Opt += qualifyWindowsLibrary(Lib);
2334 void getDetectMismatchOption(llvm::StringRef
Name,
2335 llvm::StringRef
Value,
2337 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2341 static void addStackProbeSizeTargetAttribute(
const Decl *D,
2342 llvm::GlobalValue *GV,
2344 if (D && isa<FunctionDecl>(D)) {
2346 llvm::Function *Fn = cast<llvm::Function>(GV);
2348 Fn->addFnAttr(
"stack-probe-size",
2354 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
2355 llvm::GlobalValue *GV,
2357 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2359 addStackProbeSizeTargetAttribute(D, GV, CGM);
2368 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2385 void getDependentLibraryOption(llvm::StringRef Lib,
2387 Opt =
"/DEFAULTLIB:";
2388 Opt += qualifyWindowsLibrary(Lib);
2391 void getDetectMismatchOption(llvm::StringRef Name,
2392 llvm::StringRef Value,
2394 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2398 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
2399 llvm::GlobalValue *GV,
2403 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2404 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2405 llvm::Function *Fn = cast<llvm::Function>(GV);
2406 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2410 addStackProbeSizeTargetAttribute(D, GV, CGM);
2414 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2439 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2441 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2443 if (Hi == SSEUp && Lo != SSE)
2447 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2471 assert((Accum != Memory && Accum != ComplexX87) &&
2472 "Invalid accumulated classification during merge.");
2473 if (Accum == Field || Field == NoClass)
2475 if (Field == Memory)
2477 if (Accum == NoClass)
2479 if (Accum == Integer || Field == Integer)
2481 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2482 Accum == X87 || Accum == X87Up)
2487 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
2488 Class &Lo, Class &Hi,
bool isNamedArg)
const {
2505 if (k == BuiltinType::Void) {
2507 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2510 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2512 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2514 }
else if (k == BuiltinType::LongDouble) {
2515 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2516 if (LDF == &llvm::APFloat::IEEEquad()) {
2519 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2522 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
2525 llvm_unreachable(
"unexpected long double representation!");
2534 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2545 if (Has64BitPointers) {
2552 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2553 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2554 if (EB_FuncPtr != EB_ThisAdj) {
2567 uint64_t Size = getContext().getTypeSize(VT);
2568 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2577 uint64_t EB_Lo = (OffsetBase) / 64;
2578 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2581 }
else if (Size == 64) {
2582 QualType ElementType = VT->getElementType();
2591 if (!classifyIntegerMMXAsSSE() &&
2602 if (OffsetBase && OffsetBase != 64)
2604 }
else if (Size == 128 ||
2605 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2629 uint64_t Size = getContext().getTypeSize(Ty);
2633 else if (Size <= 128)
2635 }
else if (ET == getContext().FloatTy) {
2637 }
else if (ET == getContext().DoubleTy) {
2639 }
else if (ET == getContext().LongDoubleTy) {
2640 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2641 if (LDF == &llvm::APFloat::IEEEquad())
2643 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2644 Current = ComplexX87;
2645 else if (LDF == &llvm::APFloat::IEEEdouble())
2648 llvm_unreachable(
"unexpected long double representation!");
2653 uint64_t EB_Real = (OffsetBase) / 64;
2654 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2655 if (Hi == NoClass && EB_Real != EB_Imag)
2664 uint64_t Size = getContext().getTypeSize(Ty);
2675 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2681 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2682 uint64_t ArraySize = AT->getSize().getZExtValue();
2689 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2692 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
2693 Class FieldLo, FieldHi;
2694 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2695 Lo = merge(Lo, FieldLo);
2696 Hi = merge(Hi, FieldHi);
2697 if (Lo == Memory || Hi == Memory)
2701 postMerge(Size, Lo, Hi);
2702 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2707 uint64_t Size = getContext().getTypeSize(Ty);
2732 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2733 for (
const auto &I : CXXRD->bases()) {
2734 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2735 "Unexpected base class!");
2737 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2744 Class FieldLo, FieldHi;
2747 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2748 Lo = merge(Lo, FieldLo);
2749 Hi = merge(Hi, FieldHi);
2750 if (Lo == Memory || Hi == Memory) {
2751 postMerge(Size, Lo, Hi);
2760 i != e; ++i, ++idx) {
2762 bool BitField = i->isBitField();
2765 if (BitField && i->isUnnamedBitfield())
2775 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
2776 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2778 postMerge(Size, Lo, Hi);
2782 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2784 postMerge(Size, Lo, Hi);
2794 Class FieldLo, FieldHi;
2800 assert(!i->isUnnamedBitfield());
2802 uint64_t Size = i->getBitWidthValue(getContext());
2804 uint64_t EB_Lo = Offset / 64;
2805 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2808 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2813 FieldHi = EB_Hi ? Integer : NoClass;
2816 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2817 Lo = merge(Lo, FieldLo);
2818 Hi = merge(Hi, FieldHi);
2819 if (Lo == Memory || Hi == Memory)
2823 postMerge(Size, Lo, Hi);
2833 Ty = EnumTy->getDecl()->getIntegerType();
2839 return getNaturalAlignIndirect(Ty);
2842 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2844 uint64_t Size = getContext().getTypeSize(VecTy);
2845 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2846 if (Size <= 64 || Size > LargestVector)
2854 unsigned freeIntRegs)
const {
2866 Ty = EnumTy->getDecl()->getIntegerType();
2877 unsigned Align =
std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2900 if (freeIntRegs == 0) {
2901 uint64_t Size = getContext().getTypeSize(Ty);
2905 if (Align == 8 && Size <= 64)
2922 if (isa<llvm::VectorType>(IRType) ||
2923 IRType->getTypeID() == llvm::Type::FP128TyID)
2927 uint64_t Size = getContext().getTypeSize(Ty);
2928 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2931 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2948 if (TySize <= StartBit)
2953 unsigned NumElts = (
unsigned)AT->getSize().getZExtValue();
2956 for (
unsigned i = 0; i != NumElts; ++i) {
2958 unsigned EltOffset = i*EltSize;
2959 if (EltOffset >= EndBit)
break;
2961 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2975 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2976 for (
const auto &I : CXXRD->bases()) {
2977 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2978 "Unexpected base class!");
2980 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2984 if (BaseOffset >= EndBit)
continue;
2986 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2999 i != e; ++i, ++idx) {
3003 if (FieldOffset >= EndBit)
break;
3005 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3024 const llvm::DataLayout &TD) {
3026 if (IROffset == 0 && IRType->isFloatTy())
3030 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3031 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3032 unsigned Elt = SL->getElementContainingOffset(IROffset);
3033 IROffset -= SL->getElementOffset(Elt);
3038 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3040 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3041 IROffset -= IROffset/EltSize*EltSize;
3052 GetSSETypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3053 QualType SourceTy,
unsigned SourceOffset)
const {
3058 SourceOffset*8+64, getContext()))
3059 return llvm::Type::getFloatTy(getVMContext());
3066 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
3068 return llvm::Type::getDoubleTy(getVMContext());
3087 GetINTEGERTypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3088 QualType SourceTy,
unsigned SourceOffset)
const {
3091 if (IROffset == 0) {
3093 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3094 IRType->isIntegerTy(64))
3103 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3104 IRType->isIntegerTy(32) ||
3105 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3106 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3107 cast<llvm::IntegerType>(IRType)->getBitWidth();
3110 SourceOffset*8+64, getContext()))
3115 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3117 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3118 if (IROffset < SL->getSizeInBytes()) {
3119 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3120 IROffset -= SL->getElementOffset(FieldIdx);
3122 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3123 SourceTy, SourceOffset);
3127 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3129 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3130 unsigned EltOffset = IROffset/EltSize*EltSize;
3131 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3137 unsigned TySizeInBytes =
3138 (
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3140 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
3144 return llvm::IntegerType::get(getVMContext(),
3145 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3156 const llvm::DataLayout &TD) {
3161 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
3162 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3163 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3164 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
3176 if (Lo->isFloatTy())
3177 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3179 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3180 &&
"Invalid/unknown lo type");
3181 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3185 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3188 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3189 "Invalid x86-64 argument pair!");
3197 X86_64ABIInfo::Class Lo, Hi;
3198 classify(RetTy, 0, Lo, Hi,
true);
3201 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3202 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3211 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3212 "Unknown missing lo part");
3217 llvm_unreachable(
"Invalid classification for lo word.");
3222 return getIndirectReturnResult(RetTy);
3227 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3231 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3234 RetTy = EnumTy->getDecl()->getIntegerType();
3245 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3251 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3258 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3259 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3260 llvm::Type::getX86_FP80Ty(getVMContext()));
3270 llvm_unreachable(
"Invalid classification for hi word.");
3277 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3282 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3293 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3294 ResType = GetByteVectorType(RetTy);
3305 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3322 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
3328 X86_64ABIInfo::Class Lo, Hi;
3329 classify(Ty, 0, Lo, Hi, isNamedArg);
3333 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3334 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3345 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3346 "Unknown missing lo part");
3359 return getIndirectResult(Ty, freeIntRegs);
3363 llvm_unreachable(
"Invalid classification for lo word.");
3372 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3376 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3379 Ty = EnumTy->getDecl()->getIntegerType();
3393 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3407 llvm_unreachable(
"Invalid classification for hi word.");
3409 case NoClass:
break;
3414 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3424 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3436 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3437 ResType = GetByteVectorType(Ty);
3451 X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
3452 unsigned &NeededSSE)
const {
3454 assert(RT &&
"classifyRegCallStructType only valid with struct types");
3457 return getIndirectReturnResult(Ty);
3460 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->
getDecl())) {
3461 if (CXXRD->isDynamicClass()) {
3462 NeededInt = NeededSSE = 0;
3463 return getIndirectReturnResult(Ty);
3466 for (
const auto &I : CXXRD->bases())
3467 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3469 NeededInt = NeededSSE = 0;
3470 return getIndirectReturnResult(Ty);
3476 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3477 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3479 NeededInt = NeededSSE = 0;
3480 return getIndirectReturnResult(Ty);
3483 unsigned LocalNeededInt, LocalNeededSSE;
3485 LocalNeededSSE,
true)
3487 NeededInt = NeededSSE = 0;
3488 return getIndirectReturnResult(Ty);
3490 NeededInt += LocalNeededInt;
3491 NeededSSE += LocalNeededSSE;
3499 unsigned &NeededInt,
3500 unsigned &NeededSSE)
const {
3505 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3513 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3514 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3515 unsigned NeededInt, NeededSSE;
3520 classifyRegCallStructType(FI.
getReturnType(), NeededInt, NeededSSE);
3521 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3522 FreeIntRegs -= NeededInt;
3523 FreeSSERegs -= NeededSSE;
3527 }
else if (!getCXXABI().classifyReturnType(FI))
3544 it != ie; ++it, ++ArgNo) {
3545 bool IsNamedArg = ArgNo < NumRequiredArgs;
3547 if (IsRegCall && it->type->isStructureOrClassType())
3548 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3551 NeededSSE, IsNamedArg);
3557 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3558 FreeIntRegs -= NeededInt;
3559 FreeSSERegs -= NeededSSE;
3561 it->info = getIndirectResult(it->type, FreeIntRegs);
3587 llvm::PointerType::getUnqual(LTy));
3596 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3597 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
3598 "overflow_arg_area.next");
3602 return Address(Res, Align);
3605 Address X86_64ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
3614 unsigned neededInt, neededSSE;
3622 if (!neededInt && !neededSSE)
3638 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3644 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3645 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3654 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3655 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3656 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3662 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3684 if (neededInt && neededSSE) {
3686 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3690 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3693 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3694 "Unexpected ABI info for mixed regs");
3695 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3696 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3699 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3700 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3715 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3719 }
else if (neededInt) {
3720 RegAddr = Address(CGF.
Builder.CreateGEP(RegSaveArea, gp_offset),
3725 std::pair<CharUnits, CharUnits> SizeAlign =
3726 getContext().getTypeInfoInChars(Ty);
3727 uint64_t TySize = SizeAlign.first.getQuantity();
3738 }
else if (neededSSE == 1) {
3739 RegAddr = Address(CGF.
Builder.CreateGEP(RegSaveArea, fp_offset),
3743 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3750 Address RegAddrLo = Address(CGF.
Builder.CreateGEP(RegSaveArea, fp_offset),
3756 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy);
3795 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3800 Address X86_64ABIInfo::EmitMSVAArg(
CodeGenFunction &CGF, Address VAListAddr,
3809 WinX86_64ABIInfo::reclassifyHvaArgType(
QualType Ty,
unsigned &FreeSSERegs,
3812 const Type *Base =
nullptr;
3813 uint64_t NumElts = 0;
3816 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3817 FreeSSERegs -= NumElts;
3818 return getDirectX86Hva();
3824 bool IsReturnType,
bool IsVectorCall,
3825 bool IsRegCall)
const {
3831 Ty = EnumTy->getDecl()->getIntegerType();
3833 TypeInfo Info = getContext().getTypeInfo(Ty);
3834 uint64_t Width = Info.
Width;
3839 if (!IsReturnType) {
3845 return getNaturalAlignIndirect(Ty,
false);
3849 const Type *Base =
nullptr;
3850 uint64_t NumElts = 0;
3853 if ((IsVectorCall || IsRegCall) &&
3854 isHomogeneousAggregate(Ty, Base, NumElts)) {
3856 if (FreeSSERegs >= NumElts) {
3857 FreeSSERegs -= NumElts;
3863 }
else if (IsVectorCall) {
3864 if (FreeSSERegs >= NumElts &&
3866 FreeSSERegs -= NumElts;
3868 }
else if (IsReturnType) {
3881 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3888 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3889 return getNaturalAlignIndirect(Ty,
false);
3898 if (BT && BT->
getKind() == BuiltinType::Bool)
3903 if (IsMingw64 && BT && BT->
getKind() == BuiltinType::LongDouble) {
3904 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3905 if (LDF == &llvm::APFloat::x87DoubleExtended())
3913 unsigned FreeSSERegs,
3915 bool IsRegCall)
const {
3920 if (Count < VectorcallMaxParamNumAsReg)
3921 I.
info = classify(I.
type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
3925 unsigned ZeroSSERegsAvail = 0;
3926 I.
info = classify(I.
type, ZeroSSERegsAvail,
false,
3927 IsVectorCall, IsRegCall);
3933 I.
info = reclassifyHvaArgType(I.
type, FreeSSERegs, I.
info);
3942 unsigned FreeSSERegs = 0;
3946 }
else if (IsRegCall) {
3953 IsVectorCall, IsRegCall);
3958 }
else if (IsRegCall) {
3964 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
3967 I.
info = classify(I.
type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
3972 Address WinX86_64ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
3975 bool IsIndirect =
false;
3980 uint64_t Width = getContext().getTypeSize(Ty);
3981 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3993 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
3994 bool IsSoftFloatABI;
3997 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4005 PPC32TargetCodeGenInfo(
CodeGenTypes &CGT,
bool SoftFloatABI)
4021 Address PPC32_SVR4_ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAList,
4023 const unsigned OverflowLimit = 8;
4038 bool isI64 = Ty->
isIntegerType() && getContext().getTypeSize(Ty) == 64;
4041 bool isF64 = Ty->
isFloatingType() && getContext().getTypeSize(Ty) == 64;
4051 if (isInt || IsSoftFloatABI) {
4060 if (isI64 || (isF64 && IsSoftFloatABI)) {
4061 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4062 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4066 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
4072 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4075 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4082 Address RegSaveAreaPtr =
4084 RegAddr = Address(Builder.
CreateLoad(RegSaveAreaPtr),
4089 if (!(isInt || IsSoftFloatABI)) {
4098 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
4099 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.
Int8Ty,
4106 Builder.CreateAdd(NumRegs,
4107 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4118 Builder.
CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4126 Size =
TypeInfo.first.alignTo(OverflowAreaAlign);
4131 Address OverflowAreaAddr =
4133 Address OverflowArea(Builder.
CreateLoad(OverflowAreaAddr,
"argp.cur"),
4137 if (Align > OverflowAreaAlign) {
4147 Builder.
CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4154 Address Result =
emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4159 Result = Address(Builder.
CreateLoad(Result,
"aggr"),
4160 getContext().getTypeAlignInChars(Ty));
4174 llvm::IntegerType *i8 = CGF.
Int8Ty;
4175 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4176 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4177 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4211 class PPC64_SVR4_ABIInfo :
public ABIInfo {
4219 static const unsigned GPRBits = 64;
4222 bool IsSoftFloatABI;
4226 bool IsQPXVectorTy(
const Type *Ty)
const {
4231 unsigned NumElements = VT->getNumElements();
4232 if (NumElements == 1)
4235 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4236 if (getContext().getTypeSize(Ty) <= 256)
4238 }
else if (VT->getElementType()->
4239 isSpecificBuiltinType(BuiltinType::Float)) {
4240 if (getContext().getTypeSize(Ty) <= 128)
4248 bool IsQPXVectorTy(
QualType Ty)
const {
4255 :
ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4256 IsSoftFloatABI(SoftFloatABI) {}
4258 bool isPromotableTypeForABI(
QualType Ty)
const;
4264 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
4265 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
4266 uint64_t Members)
const override;
4284 if (IsQPXVectorTy(T) ||
4285 (T->
isVectorType() && getContext().getTypeSize(T) == 128) ||
4304 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX,
4318 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
4320 PPC64TargetCodeGenInfo(
CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4336 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
4339 Ty = EnumTy->getDecl()->getIntegerType();
4349 case BuiltinType::Int:
4350 case BuiltinType::UInt:
4364 Ty = CTy->getElementType();
4368 if (IsQPXVectorTy(Ty)) {
4369 if (getContext().getTypeSize(Ty) > 128)
4379 const Type *AlignAsType =
nullptr;
4383 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
4384 getContext().getTypeSize(EltType) == 128) ||
4386 AlignAsType = EltType;
4390 const Type *Base =
nullptr;
4391 uint64_t Members = 0;
4392 if (!AlignAsType &&
Kind == ELFv2 &&
4397 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4398 if (getContext().getTypeSize(AlignAsType) > 128)
4402 }
else if (AlignAsType) {
4409 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4421 uint64_t &Members)
const {
4423 uint64_t NElements = AT->getSize().getZExtValue();
4428 Members *= NElements;
4437 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4438 for (
const auto &I : CXXRD->bases()) {
4443 uint64_t FldMembers;
4447 Members += FldMembers;
4451 for (
const auto *FD : RD->
fields()) {
4456 if (AT->getSize().getZExtValue() == 0)
4458 FT = AT->getElementType();
4465 FD->isBitField() && FD->getBitWidthValue(
getContext()) == 0)
4468 uint64_t FldMembers;
4473 std::max(Members, FldMembers) : Members + FldMembers);
4487 Ty = CT->getElementType();
4503 QualType EltTy = VT->getElementType();
4504 unsigned NumElements =
4519 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4523 if (BT->
getKind() == BuiltinType::Float ||
4524 BT->
getKind() == BuiltinType::Double ||
4525 BT->
getKind() == BuiltinType::LongDouble) {
4532 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4538 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4539 const Type *Base, uint64_t Members)
const {
4543 Base->
isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
4546 return Members * NumRegs <= 8;
4559 uint64_t Size = getContext().getTypeSize(Ty);
4561 return getNaturalAlignIndirect(Ty,
false);
4562 else if (Size < 128) {
4563 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4572 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4573 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).
getQuantity();
4576 const Type *Base =
nullptr;
4577 uint64_t Members = 0;
4578 if (
Kind == ELFv2 &&
4579 isHomogeneousAggregate(Ty, Base, Members)) {
4581 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4589 uint64_t Bits = getContext().getTypeSize(Ty);
4590 if (Bits > 0 && Bits <= 8 * GPRBits) {
4595 if (Bits <= GPRBits)
4597 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4601 uint64_t RegBits = ABIAlign * 8;
4602 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4603 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4604 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4613 TyAlign > ABIAlign);
4616 return (isPromotableTypeForABI(Ty) ?
4631 uint64_t Size = getContext().getTypeSize(RetTy);
4633 return getNaturalAlignIndirect(RetTy);
4634 else if (Size < 128) {
4635 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4642 const Type *Base =
nullptr;
4643 uint64_t Members = 0;
4644 if (
Kind == ELFv2 &&
4645 isHomogeneousAggregate(RetTy, Base, Members)) {
4647 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4652 uint64_t Bits = getContext().getTypeSize(RetTy);
4653 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
4658 if (Bits > GPRBits) {
4659 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4660 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4663 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4668 return getNaturalAlignIndirect(RetTy);
4671 return (isPromotableTypeForABI(RetTy) ?
4676 Address PPC64_SVR4_ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
4678 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4679 TypeInfo.second = getParamTypeAlignment(Ty);
4691 if (EltSize < SlotSize) {
4693 SlotSize * 2, SlotSize,
4696 Address RealAddr = Addr;
4697 Address ImagAddr = RealAddr;
4700 SlotSize - EltSize);
4702 2 * SlotSize - EltSize);
4733 llvm::IntegerType *i8 = CGF.
Int8Ty;
4734 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4735 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4736 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4773 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4809 ABIKind getABIKind()
const {
return Kind; }
4810 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
4814 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
4815 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
4816 uint64_t Members)
const override;
4818 bool isIllegalVectorType(
QualType Ty)
const;
4828 Address EmitDarwinVAArg(Address VAListAddr,
QualType Ty,
4831 Address EmitAAPCSVAArg(Address VAListAddr,
QualType Ty,
4836 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
4837 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4838 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4844 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
4846 bool asReturnValue)
const override {
4849 bool isSwiftErrorInRegister()
const override {
4854 unsigned elts)
const override;
4862 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
4863 return "mov\tfp, fp\t\t# marker for objc_retainAutoreleaseReturnValue";
4870 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
4878 if (isIllegalVectorType(Ty)) {
4879 uint64_t Size = getContext().getTypeSize(Ty);
4881 if (isAndroid() && (Size <= 16)) {
4882 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
4886 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4891 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4896 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4899 return getNaturalAlignIndirect(Ty,
false);
4905 Ty = EnumTy->getDecl()->getIntegerType();
4915 return getNaturalAlignIndirect(Ty, RAA ==
4921 uint64_t Size = getContext().getTypeSize(Ty);
4923 if (IsEmpty || Size == 0) {
4924 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
4929 if (IsEmpty && Size == 0)
4935 const Type *Base =
nullptr;
4936 uint64_t Members = 0;
4937 if (isHomogeneousAggregate(Ty, Base, Members)) {
4939 llvm::ArrayType::get(CGT.ConvertType(
QualType(Base, 0)), Members));
4946 if (getTarget().isRenderScriptTarget()) {
4949 unsigned Alignment = getContext().getTypeAlign(Ty);
4950 Size = llvm::alignTo(Size, 64);
4954 if (Alignment < 128 && Size == 128) {
4955 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4961 return getNaturalAlignIndirect(Ty,
false);
4969 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
4970 return getNaturalAlignIndirect(RetTy);
4975 RetTy = EnumTy->getDecl()->getIntegerType();
4982 uint64_t Size = getContext().getTypeSize(RetTy);
4986 const Type *Base =
nullptr;
4987 uint64_t Members = 0;
4988 if (isHomogeneousAggregate(RetTy, Base, Members))
4996 if (getTarget().isRenderScriptTarget()) {
4999 unsigned Alignment = getContext().getTypeAlign(RetTy);
5000 Size = llvm::alignTo(Size, 64);
5004 if (Alignment < 128 && Size == 128) {
5005 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5011 return getNaturalAlignIndirect(RetTy);
5015 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
5018 unsigned NumElements = VT->getNumElements();
5019 uint64_t Size = getContext().getTypeSize(VT);
5021 if (!llvm::isPowerOf2_32(NumElements))
5023 return Size != 64 && (Size != 128 || NumElements == 1);
5028 bool AArch64ABIInfo::isLegalVectorTypeForSwift(
CharUnits totalSize,
5030 unsigned elts)
const {
5031 if (!llvm::isPowerOf2_32(elts))
5039 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5048 unsigned VecSize = getContext().getTypeSize(VT);
5049 if (VecSize == 64 || VecSize == 128)
5055 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5056 uint64_t Members)
const {
5057 return Members <= 4;
5060 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5068 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5072 unsigned NumRegs = 1;
5073 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5074 BaseTy = ArrTy->getElementType();
5075 NumRegs = ArrTy->getNumElements();
5077 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5095 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5102 int RegSize = IsIndirect ? 8 : TyInfo.first.
getQuantity();
5111 RegSize = llvm::alignTo(RegSize, 8);
5120 RegSize = 16 * NumRegs;
5132 UsingStack = CGF.
Builder.CreateICmpSGE(
5133 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
5135 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5144 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
5147 reg_offs = CGF.
Builder.CreateAdd(
5148 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
5150 reg_offs = CGF.
Builder.CreateAnd(
5151 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
5160 NewOffset = CGF.
Builder.CreateAdd(
5161 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
5167 InRegs = CGF.
Builder.CreateICmpSLE(
5168 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
5170 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5182 reg_top_offset,
"reg_top_p");
5184 Address BaseAddr(CGF.
Builder.CreateInBoundsGEP(reg_top, reg_offs),
5192 MemTy = llvm::PointerType::getUnqual(MemTy);
5195 const Type *Base =
nullptr;
5196 uint64_t NumMembers = 0;
5197 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5198 if (IsHFA && NumMembers > 1) {
5203 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
5204 auto BaseTyInfo = getContext().getTypeInfoInChars(
QualType(Base, 0));
5206 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5208 std::max(TyAlign, BaseTyInfo.second));
5213 BaseTyInfo.first.getQuantity() < 16)
5214 Offset = 16 - BaseTyInfo.first.getQuantity();
5216 for (
unsigned i = 0; i < NumMembers; ++i) {
5234 CharUnits SlotSize = BaseAddr.getAlignment();
5237 TyInfo.first < SlotSize) {
5238 CharUnits Offset = SlotSize - TyInfo.first;
5261 OnStackPtr = CGF.
Builder.CreatePtrToInt(OnStackPtr, CGF.
Int64Ty);
5263 OnStackPtr = CGF.
Builder.CreateAdd(
5264 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
5266 OnStackPtr = CGF.
Builder.CreateAnd(
5267 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
5272 Address OnStackAddr(OnStackPtr,
5279 StackSize = StackSlotSize;
5281 StackSize = TyInfo.first.
alignTo(StackSlotSize);
5285 CGF.
Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC,
"new_stack");
5291 TyInfo.first < StackSlotSize) {
5292 CharUnits Offset = StackSlotSize - TyInfo.first;
5305 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock,
5306 OnStackAddr, OnStackBlock,
"vaargs.addr");
5315 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr,
QualType Ty,
5334 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5338 bool IsIndirect =
false;
5339 if (TyInfo.first.getQuantity() > 16) {
5340 const Type *Base =
nullptr;
5341 uint64_t Members = 0;
5342 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
5346 TyInfo, SlotSize,
true);
5349 Address AArch64ABIInfo::EmitMSVAArg(
CodeGenFunction &CGF, Address VAListAddr,
5381 bool isEABI()
const {
5382 switch (getTarget().getTriple().getEnvironment()) {
5383 case llvm::Triple::Android:
5384 case llvm::Triple::EABI:
5385 case llvm::Triple::EABIHF:
5386 case llvm::Triple::GNUEABI:
5387 case llvm::Triple::GNUEABIHF:
5388 case llvm::Triple::MuslEABI:
5389 case llvm::Triple::MuslEABIHF:
5396 bool isEABIHF()
const {
5397 switch (getTarget().getTriple().getEnvironment()) {
5398 case llvm::Triple::EABIHF:
5399 case llvm::Triple::GNUEABIHF:
5400 case llvm::Triple::MuslEABIHF:
5407 ABIKind getABIKind()
const {
return Kind; }
5412 bool isIllegalVectorType(
QualType Ty)
const;
5414 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
5415 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
5416 uint64_t Members)
const override;
5427 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
5429 bool asReturnValue)
const override {
5432 bool isSwiftErrorInRegister()
const override {
5436 unsigned elts)
const override;
5441 ARMTargetCodeGenInfo(
CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5444 const ARMABIInfo &getABIInfo()
const {
5452 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5453 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
5465 unsigned getSizeOfUnwindException()
const override {
5466 if (getABIInfo().isEABI())
return 88;
5470 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5472 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5476 const ARMInterruptAttr *
Attr = FD->
getAttr<ARMInterruptAttr>();
5481 switch (Attr->getInterrupt()) {
5482 case ARMInterruptAttr::Generic: Kind =
"";
break;
5483 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
5484 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
5485 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
5486 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
5487 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
5490 llvm::Function *Fn = cast<llvm::Function>(GV);
5492 Fn->addFnAttr(
"interrupt", Kind);
5494 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5495 if (ABI == ARMABIInfo::APCS)
5501 llvm::AttrBuilder B;
5502 B.addStackAlignmentAttr(8);
5503 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5507 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
5509 WindowsARMTargetCodeGenInfo(
CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5510 : ARMTargetCodeGenInfo(CGT, K) {}
5512 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5515 void getDependentLibraryOption(llvm::StringRef Lib,
5517 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5520 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5522 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5526 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5528 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5529 addStackProbeSizeTargetAttribute(D, GV, CGM);
5553 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5554 return llvm::CallingConv::ARM_AAPCS_VFP;
5556 return llvm::CallingConv::ARM_AAPCS;
5558 return llvm::CallingConv::ARM_APCS;
5564 switch (getABIKind()) {
5565 case APCS:
return llvm::CallingConv::ARM_APCS;
5566 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
5567 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5568 case AAPCS16_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5570 llvm_unreachable(
"bad ABI kind");
5573 void ARMABIInfo::setCCs() {
5579 if (abiCC != getLLVMDefaultCC())
5591 if (abiCC != getLLVMDefaultCC())
5596 bool isVariadic)
const {
5604 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5609 if (isIllegalVectorType(Ty)) {
5610 uint64_t Size = getContext().getTypeSize(Ty);
5613 llvm::Type::getInt32Ty(getVMContext());
5618 llvm::Type::getInt32Ty(getVMContext()), 2);
5623 llvm::Type::getInt32Ty(getVMContext()), 4);
5626 return getNaturalAlignIndirect(Ty,
false);
5632 if (Ty->
isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5633 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5634 llvm::Type::getFloatTy(getVMContext()) :
5635 llvm::Type::getInt32Ty(getVMContext());
5642 Ty = EnumTy->getDecl()->getIntegerType();
5657 if (IsEffectivelyAAPCS_VFP) {
5660 const Type *Base =
nullptr;
5661 uint64_t Members = 0;
5662 if (isHomogeneousAggregate(Ty, Base, Members)) {
5663 assert(Base &&
"Base class should be set for homogeneous aggregate");
5667 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5671 const Type *Base =
nullptr;
5672 uint64_t Members = 0;
5673 if (isHomogeneousAggregate(Ty, Base, Members)) {
5674 assert(Base && Members <= 4 &&
"unexpected homogeneous aggregate");
5676 llvm::ArrayType::get(CGT.ConvertType(
QualType(Base, 0)), Members);
5681 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5694 uint64_t ABIAlign = 4;
5695 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
5696 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5697 getABIKind() == ARMABIInfo::AAPCS)
5701 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP &&
"unexpected byval");
5704 TyAlign > ABIAlign);
5709 if (getTarget().isRenderScriptTarget()) {
5718 if (getContext().getTypeAlign(Ty) <= 32) {
5719 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5720 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5722 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5723 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5730 llvm::LLVMContext &VMContext) {
5762 if (!RT)
return false;
5773 bool HadField =
false;
5776 i != e; ++i, ++idx) {
5815 bool isVariadic)
const {
5816 bool IsEffectivelyAAPCS_VFP =
5817 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5823 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128) {
5824 return getNaturalAlignIndirect(RetTy);
5830 if (RetTy->
isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5831 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5832 llvm::Type::getFloatTy(getVMContext()) :
5833 llvm::Type::getInt32Ty(getVMContext());
5840 RetTy = EnumTy->getDecl()->getIntegerType();
5847 if (getABIKind() == APCS) {
5857 getVMContext(), getContext().getTypeSize(RetTy)));
5862 uint64_t Size = getContext().getTypeSize(RetTy);
5871 return getNaturalAlignIndirect(RetTy);
5880 if (IsEffectivelyAAPCS_VFP) {
5881 const Type *Base =
nullptr;
5882 uint64_t Members = 0;
5883 if (isHomogeneousAggregate(RetTy, Base, Members)) {
5884 assert(Base &&
"Base class should be set for homogeneous aggregate");
5892 uint64_t Size = getContext().getTypeSize(RetTy);
5896 if (getTarget().isRenderScriptTarget()) {
5899 if (getDataLayout().isBigEndian())
5909 }
else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
5910 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
5912 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
5916 return getNaturalAlignIndirect(RetTy);
5920 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
5928 unsigned NumElements = VT->getNumElements();
5930 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
5934 unsigned NumElements = VT->getNumElements();
5935 uint64_t Size = getContext().getTypeSize(VT);
5937 if (!llvm::isPowerOf2_32(NumElements))
5946 bool ARMABIInfo::isLegalVectorTypeForSwift(
CharUnits vectorSize,
5948 unsigned numElts)
const {
5949 if (!llvm::isPowerOf2_32(numElts))
5951 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
5960 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5964 if (BT->
getKind() == BuiltinType::Float ||
5965 BT->
getKind() == BuiltinType::Double ||
5966 BT->
getKind() == BuiltinType::LongDouble)
5969 unsigned VecSize = getContext().getTypeSize(VT);
5970 if (VecSize == 64 || VecSize == 128)
5976 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5977 uint64_t Members)
const {
5978 return Members <= 4;
5981 Address ARMABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
5992 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5993 CharUnits TyAlignForABI = TyInfo.second;
5996 bool IsIndirect =
false;
5997 const Type *Base =
nullptr;
5998 uint64_t Members = 0;
6005 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6006 !isHomogeneousAggregate(Ty, Base, Members)) {
6013 }
else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6014 getABIKind() == ARMABIInfo::AAPCS) {
6017 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6024 TyInfo.second = TyAlignForABI;
6036 class NVPTXABIInfo :
public ABIInfo {
6053 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6058 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
6071 RetTy = EnumTy->getDecl()->getIntegerType();
6080 Ty = EnumTy->getDecl()->getIntegerType();
6084 return getNaturalAlignIndirect(Ty,
true);
6103 Address NVPTXABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
6105 llvm_unreachable(
"NVPTX does not support varargs");
6108 void NVPTXTargetCodeGenInfo::
6109 setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6111 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6114 llvm::Function *F = cast<llvm::Function>(GV);
6120 if (FD->
hasAttr<OpenCLKernelAttr>()) {
6123 addNVVMMetadata(F,
"kernel", 1);
6125 F->addFnAttr(llvm::Attribute::NoInline);
6134 if (FD->
hasAttr<CUDAGlobalAttr>()) {
6136 addNVVMMetadata(F,
"kernel", 1);
6138 if (CUDALaunchBoundsAttr *Attr = FD->
getAttr<CUDALaunchBoundsAttr>()) {
6140 llvm::APSInt MaxThreads(32);
6141 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
6143 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
6148 if (Attr->getMinBlocks()) {
6149 llvm::APSInt MinBlocks(32);
6150 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
6153 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
6159 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6161 llvm::Module *M = F->getParent();
6165 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
6167 llvm::Metadata *MDVals[] = {
6168 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6169 llvm::ConstantAsMetadata::get(
6170 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6172 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6189 bool isPromotableIntegerType(
QualType Ty)
const;
6190 bool isCompoundType(
QualType Ty)
const;
6191 bool isVectorArgumentType(
QualType Ty)
const;
6192 bool isFPArgumentType(
QualType Ty)
const;
6208 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
6210 bool asReturnValue)
const override {
6213 bool isSwiftErrorInRegister()
const override {
6220 SystemZTargetCodeGenInfo(
CodeGenTypes &CGT,
bool HasVector)
6226 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
6229 Ty = EnumTy->getDecl()->getIntegerType();
6238 case BuiltinType::Int:
6239 case BuiltinType::UInt:
6247 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
6253 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
6254 return (HasVector &&
6256 getContext().getTypeSize(Ty) <= 128);
6259 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
6262 case BuiltinType::Float:
6263 case BuiltinType::Double:
6278 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6279 for (
const auto &I : CXXRD->bases()) {
6288 Found = GetSingleElementType(Base);
6292 for (
const auto *FD : RD->
fields()) {
6296 if (getContext().getLangOpts().CPlusPlus &&
6297 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
6304 Found = GetSingleElementType(FD->
getType());
6316 Address SystemZABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
6330 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6335 bool InFPRs =
false;
6336 bool IsVector =
false;
6340 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6345 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6346 IsVector = ArgTy->isVectorTy();
6347 UnpaddedSize = TyInfo.first;
6348 DirectAlign = TyInfo.second;
6351 if (IsVector && UnpaddedSize > PaddedSize)
6353 assert((UnpaddedSize <= PaddedSize) &&
"Invalid argument size.");
6355 CharUnits Padding = (PaddedSize - UnpaddedSize);
6359 llvm::ConstantInt::get(IndexTy, PaddedSize.
getQuantity());
6365 Address OverflowArgAreaPtr =
6367 "overflow_arg_area_ptr");
6368 Address OverflowArgArea =
6377 "overflow_arg_area");
6385 unsigned MaxRegs, RegCountField, RegSaveIndex;
6396 RegPadding = Padding;
6403 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6410 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6417 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
6419 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.
getQuantity()
6422 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
6423 Address RegSaveAreaPtr =
6425 "reg_save_area_ptr");
6428 Address RawRegAddr(CGF.
Builder.CreateGEP(RegSaveArea, RegOffset,
6435 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6437 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
6447 Address OverflowArgArea =
6450 Address RawMemAddr =
6458 "overflow_arg_area");
6464 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock,
6465 MemAddr, InMemBlock,
"va_arg.addr");
6477 if (isVectorArgumentType(RetTy))
6479 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6480 return getNaturalAlignIndirect(RetTy);
6481 return (isPromotableIntegerType(RetTy) ?
6491 if (isPromotableIntegerType(Ty))
6497 uint64_t Size = getContext().getTypeSize(Ty);
6498 QualType SingleElementTy = GetSingleElementType(Ty);
6499 if (isVectorArgumentType(SingleElementTy) &&
6500 getContext().getTypeSize(SingleElementTy) == Size)
6504 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6505 return getNaturalAlignIndirect(Ty,
false);
6513 return getNaturalAlignIndirect(Ty,
false);
6517 if (isFPArgumentType(SingleElementTy)) {
6518 assert(Size == 32 || Size == 64);
6520 PassTy = llvm::Type::getFloatTy(getVMContext());
6522 PassTy = llvm::Type::getDoubleTy(getVMContext());
6524 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6529 if (isCompoundType(Ty))
6530 return getNaturalAlignIndirect(Ty,
false);
6545 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6551 void MSP430TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
6552 llvm::GlobalValue *GV,
6554 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6555 if (
const MSP430InterruptAttr *attr = FD->
getAttr<MSP430InterruptAttr>()) {
6557 llvm::Function *F = cast<llvm::Function>(GV);
6560 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6563 F->addFnAttr(llvm::Attribute::NoInline);
6566 unsigned Num = attr->getNumber() / 2;
6568 "__isr_" + Twine(Num), F);
6579 class MipsABIInfo :
public ABIInfo {
6581 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6582 void CoerceToIntArgs(uint64_t TySize,
6586 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset)
const;
6589 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6590 StackAlignInBytes(IsO32 ? 8 : 16) {}
6597 bool shouldSignExtUnsignedType(
QualType Ty)
const override;
6601 unsigned SizeOfUnwindException;
6605 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6611 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6613 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6615 llvm::Function *Fn = cast<llvm::Function>(GV);
6616 if (FD->
hasAttr<Mips16Attr>()) {
6617 Fn->addFnAttr(
"mips16");
6619 else if (FD->
hasAttr<NoMips16Attr>()) {
6620 Fn->addFnAttr(
"nomips16");
6623 if (FD->
hasAttr<MicroMipsAttr>())
6624 Fn->addFnAttr(
"micromips");
6625 else if (FD->
hasAttr<NoMicroMipsAttr>())
6626 Fn->addFnAttr(
"nomicromips");
6628 const MipsInterruptAttr *Attr = FD->
getAttr<MipsInterruptAttr>();
6633 switch (Attr->getInterrupt()) {
6634 case MipsInterruptAttr::eic: Kind =
"eic";
break;
6635 case MipsInterruptAttr::sw0: Kind =
"sw0";
break;
6636 case MipsInterruptAttr::sw1: Kind =
"sw1";
break;
6637 case MipsInterruptAttr::hw0: Kind =
"hw0";
break;
6638 case MipsInterruptAttr::hw1: Kind =
"hw1";
break;
6639 case MipsInterruptAttr::hw2: Kind =
"hw2";
break;
6640 case MipsInterruptAttr::hw3: Kind =
"hw3";
break;
6641 case MipsInterruptAttr::hw4: Kind =
"hw4";
break;
6642 case MipsInterruptAttr::hw5: Kind =
"hw5";
break;
6645 Fn->addFnAttr(
"interrupt", Kind);
6652 unsigned getSizeOfUnwindException()
const override {
6653 return SizeOfUnwindException;
6658 void MipsABIInfo::CoerceToIntArgs(
6660 llvm::IntegerType *IntTy =
6661 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
6664 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6665 ArgList.push_back(IntTy);
6668 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6671 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6680 CoerceToIntArgs(TySize, ArgList);
6681 return llvm::StructType::get(getVMContext(), ArgList);
6685 return CGT.ConvertType(Ty);
6691 CoerceToIntArgs(TySize, ArgList);
6692 return llvm::StructType::get(getVMContext(), ArgList);
6697 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
6699 uint64_t LastOffset = 0;
6701 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6706 i != e; ++i, ++idx) {
6710 if (!BT || BT->
getKind() != BuiltinType::Double)
6718 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6719 ArgList.push_back(I64);
6722 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6723 LastOffset = Offset + 64;
6726 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6727 ArgList.append(IntArgList.begin(), IntArgList.end());
6729 return llvm::StructType::get(getVMContext(), ArgList);
6732 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6733 uint64_t Offset)
const {
6734 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6737 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6744 uint64_t OrigOffset =
Offset;
6745 uint64_t TySize = getContext().getTypeSize(Ty);
6746 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6749 (uint64_t)StackAlignInBytes);
6750 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6751 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6759 Offset = OrigOffset + MinABIStackAlignInBytes;
6768 getPaddingType(OrigOffset, CurrOffset));
6775 Ty = EnumTy->getDecl()->getIntegerType();
6782 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
6786 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
6806 for (; b != e; ++b) {
6812 RTList.push_back(CGT.ConvertType(b->getType()));
6816 return llvm::StructType::get(getVMContext(), RTList,
6823 CoerceToIntArgs(Size, RTList);
6824 return llvm::StructType::get(getVMContext(), RTList);
6828 uint64_t Size = getContext().getTypeSize(RetTy);
6835 if (!IsO32 && Size == 0)
6854 return getNaturalAlignIndirect(RetTy);
6859 RetTy = EnumTy->getDecl()->getIntegerType();
6871 uint64_t Offset = RetInfo.
isIndirect() ? MinABIStackAlignInBytes : 0;
6877 Address MipsABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
6883 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
6884 unsigned PtrWidth = getTarget().getPointerWidth(0);
6885 bool DidPromote =
false;
6887 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
6890 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
6894 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6905 TyInfo, ArgSlotSize,
true);
6911 Address Temp = CGF.
CreateMemTemp(OrigTy,
"vaarg.promotion-temp");
6928 bool MipsABIInfo::shouldSignExtUnsignedType(
QualType Ty)
const {
6929 int TySize = getContext().getTypeSize(Ty);
6976 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6978 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
6980 auto *Fn = cast<llvm::Function>(GV);
6982 if (FD->
getAttr<AVRInterruptAttr>())
6983 Fn->addFnAttr(
"interrupt");
6985 if (FD->
getAttr<AVRSignalAttr>())
6986 Fn->addFnAttr(
"signal");
6999 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
7002 : DefaultTargetCodeGenInfo(CGT) {}
7004 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7008 void TCETargetCodeGenInfo::setTargetAttributes(
7010 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7013 llvm::Function *F = cast<llvm::Function>(GV);
7016 if (FD->
hasAttr<OpenCLKernelAttr>()) {
7018 F->addFnAttr(llvm::Attribute::NoInline);
7019 const ReqdWorkGroupSizeAttr *Attr = FD->
getAttr<ReqdWorkGroupSizeAttr>();
7022 llvm::LLVMContext &Context = F->getContext();
7023 llvm::NamedMDNode *OpenCLMetadata =
7025 "opencl.kernel_wg_size_info");
7028 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7031 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7032 M.
Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7034 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7035 M.
Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7037 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7038 M.
Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7044 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7045 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7059 class HexagonABIInfo :
public ABIInfo {
7099 Ty = EnumTy->getDecl()->getIntegerType();
7112 uint64_t Size = getContext().getTypeSize(Ty);
7114 return getNaturalAlignIndirect(Ty,
true);
7131 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 64)
7132 return getNaturalAlignIndirect(RetTy);
7137 RetTy = EnumTy->getDecl()->getIntegerType();
7148 uint64_t Size = getContext().getTypeSize(RetTy);
7160 return getNaturalAlignIndirect(RetTy,
true);
7163 Address HexagonABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
7167 getContext().getTypeInfoInChars(Ty),
7177 class LanaiABIInfo :
public DefaultABIInfo {
7181 bool shouldUseInReg(
QualType Ty, CCState &State)
const;
7204 bool LanaiABIInfo::shouldUseInReg(
QualType Ty, CCState &State)
const {
7205 unsigned Size = getContext().getTypeSize(Ty);
7206 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7208 if (SizeInRegs == 0)
7211 if (SizeInRegs > State.FreeRegs) {
7216 State.FreeRegs -= SizeInRegs;
7222 CCState &State)
const {
7224 if (State.FreeRegs) {
7226 return getNaturalAlignIndirectInReg(Ty);
7228 return getNaturalAlignIndirect(Ty,
false);
7232 const unsigned MinABIStackAlignInBytes = 4;
7233 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
7236 MinABIStackAlignInBytes);
7240 CCState &State)
const {
7246 return getIndirectResult(Ty,
false, State);
7248 return getNaturalAlignIndirect(Ty,
true);
7255 return getIndirectResult(Ty,
true, State);
7261 llvm::LLVMContext &LLVMContext = getVMContext();
7262 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
7263 if (SizeInRegs <= State.FreeRegs) {
7264 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7266 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7267 State.FreeRegs -= SizeInRegs;
7272 return getIndirectResult(Ty,
true, State);
7277 Ty = EnumTy->getDecl()->getIntegerType();
7279 bool InReg = shouldUseInReg(Ty, State);
7304 class AMDGPUABIInfo final :
public DefaultABIInfo {
7320 if (CC == llvm::CallingConv::AMDGPU_KERNEL)
7328 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7334 if (StrTy->getNumElements() == 1) {
7348 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7350 unsigned getOpenCLKernelCallingConv()
const override;
7353 llvm::PointerType *T,
QualType QT)
const override;
7355 unsigned getASTAllocaAddressSpace()
const override {
7357 getABIInfo().getDataLayout().getAllocaAddrSpace();
7360 const VarDecl *D)
const override;
7364 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7366 llvm::GlobalValue *GV,
7368 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7372 llvm::Function *F = cast<llvm::Function>(GV);
7375 FD->
getAttr<ReqdWorkGroupSizeAttr>() :
nullptr;
7376 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7377 if (ReqdWGS || FlatWGS) {
7378 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
7379 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
7380 if (ReqdWGS && Min == 0 && Max == 0)
7381 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7384 assert(Min <= Max &&
"Min must be less than or equal Max");
7386 std::string AttrVal = llvm::utostr(Min) +
"," + llvm::utostr(Max);
7387 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
7389 assert(Max == 0 &&
"Max must be zero");
7392 if (
const auto *Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>()) {
7393 unsigned Min = Attr->getMin();
7394 unsigned Max = Attr->getMax();
7397 assert((Max == 0 || Min <= Max) &&
"Min must be less than or equal Max");
7399 std::string AttrVal = llvm::utostr(Min);
7401 AttrVal = AttrVal +
"," + llvm::utostr(Max);
7402 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
7404 assert(Max == 0 &&
"Max must be zero");
7407 if (
const auto *Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
7408 unsigned NumSGPR = Attr->getNumSGPR();
7411 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7414 if (
const auto *Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
7415 uint32_t NumVGPR = Attr->getNumVGPR();
7418 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7422 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
7423 return llvm::CallingConv::AMDGPU_KERNEL;
7431 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7435 return llvm::ConstantPointerNull::get(PT);
7438 auto NPT = llvm::PointerType::get(PT->getElementType(),
7440 return llvm::ConstantExpr::getAddrSpaceCast(
7441 llvm::ConstantPointerNull::get(NPT), PT);
7445 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
7449 "Address space agnostic languages only");
7450 unsigned DefaultGlobalAS =
7454 return DefaultGlobalAS;
7464 return ConstAS.getValue();
7466 return DefaultGlobalAS;
7476 class SparcV8ABIInfo :
public DefaultABIInfo {
7478 SparcV8ABIInfo(
CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7539 class SparcV9ABIInfo :
public ABIInfo {
7560 struct CoerceBuilder {
7562 const llvm::DataLayout &DL;
7567 CoerceBuilder(llvm::LLVMContext &c,
const llvm::DataLayout &dl)
7568 : Context(c), DL(dl), Size(0), InReg(
false) {}
7571 void pad(uint64_t ToSize) {
7572 assert(ToSize >= Size &&
"Cannot remove elements");
7577 uint64_t Aligned = llvm::alignTo(Size, 64);
7578 if (Aligned > Size && Aligned <= ToSize) {
7579 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
7584 while (Size + 64 <= ToSize) {
7585 Elems.push_back(llvm::Type::getInt64Ty(Context));
7590 if (Size < ToSize) {
7591 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
7597 void addFloat(uint64_t Offset,
llvm::Type *Ty,
unsigned Bits) {
7605 Elems.push_back(Ty);
7606 Size = Offset + Bits;
7610 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7611 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7612 for (
unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7613 llvm::Type *ElemTy = StrTy->getElementType(i);
7614 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7615 switch (ElemTy->getTypeID()) {
7616 case llvm::Type::StructTyID:
7617 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7619 case llvm::Type::FloatTyID:
7620 addFloat(ElemOffset, ElemTy, 32);
7622 case llvm::Type::DoubleTyID:
7623 addFloat(ElemOffset, ElemTy, 64);
7625 case llvm::Type::FP128TyID:
7626 addFloat(ElemOffset, ElemTy, 128);
7628 case llvm::Type::PointerTyID:
7629 if (ElemOffset % 64 == 0) {
7631 Elems.push_back(ElemTy);
7642 bool isUsableType(llvm::StructType *Ty)
const {
7643 return llvm::makeArrayRef(Elems) == Ty->elements();
7648 if (Elems.size() == 1)
7649 return Elems.front();
7651 return llvm::StructType::get(Context, Elems);
7662 uint64_t Size = getContext().getTypeSize(Ty);
7666 if (Size > SizeLimit)
7667 return getNaturalAlignIndirect(Ty,
false);
7671 Ty = EnumTy->getDecl()->getIntegerType();
7674 if (Size < 64 && Ty->isIntegerType())
7688 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7692 CoerceBuilder CB(getVMContext(), getDataLayout());
7693 CB.addStruct(0, StrTy);
7694 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
7697 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
7705 Address SparcV9ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
7715 Address Addr(Builder.
CreateLoad(VAListAddr,
"ap.cur"), SlotSize);
7716 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7718 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
7726 llvm_unreachable(
"Unsupported ABI kind for va_arg");
7736 auto AllocSize = getDataLayout().getTypeAllocSize(AI.
getCoerceToType());
7745 ArgAddr = Address(Builder.
CreateLoad(ArgAddr,
"indirect.arg"),
7750 return Address(llvm::UndefValue::get(ArgPtrTy),
TypeInfo.second);
7758 return Builder.
CreateBitCast(ArgAddr, ArgPtrTy,
"arg.addr");
7790 llvm::IntegerType *i8 = CGF.
Int8Ty;
7791 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
7792 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
7881 class TypeStringCache {
7882 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
7886 std::string Swapped;
7889 std::map<const IdentifierInfo *, struct Entry>
Map;
7890 unsigned IncompleteCount;
7891 unsigned IncompleteUsedCount;
7893 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
7903 class FieldEncoding {
7907 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
7908 StringRef str() {
return Enc; }
7909 bool operator<(
const FieldEncoding &rhs)
const {
7910 if (HasName != rhs.HasName)
return HasName;
7911 return Enc < rhs.Enc;
7915 class XCoreABIInfo :
public DefaultABIInfo {
7923 mutable TypeStringCache TSC;
7927 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
7935 Address XCoreABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
7941 Address AP(Builder.
CreateLoad(VAListAddr), SlotSize);
7945 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
7947 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7948 AI.setCoerceToType(ArgTy);
7949 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7953 switch (AI.getKind()) {
7957 llvm_unreachable(
"Unsupported ABI kind for va_arg");
7959 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
7967 ArgSize = ArgSize.
alignTo(SlotSize);
7971 Val = Address(Builder.
CreateLoad(Val), TypeAlign);
7992 std::string StubEnc) {
7996 assert( (E.Str.empty() || E.State == Recursive) &&
7997 "Incorrectly use of addIncomplete");
7998 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
7999 E.Swapped.swap(E.Str);
8000 E.Str.swap(StubEnc);
8001 E.State = Incomplete;
8009 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
8012 auto I =
Map.find(ID);
8013 assert(I !=
Map.end() &&
"Entry not present");
8014 Entry &E = I->second;
8015 assert( (E.State == Incomplete ||
8016 E.State == IncompleteUsed) &&
8017 "Entry must be an incomplete type");
8018 bool IsRecursive =
false;
8019 if (E.State == IncompleteUsed) {
8022 --IncompleteUsedCount;
8024 if (E.Swapped.empty())
8028 E.Swapped.swap(E.Str);
8030 E.State = Recursive;
8038 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
8040 if (!ID || IncompleteUsedCount)
8043 if (IsRecursive && !E.Str.empty()) {
8044 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8045 "This is not the same Recursive entry");
8051 assert(E.Str.empty() &&
"Entry already present");
8053 E.State = IsRecursive? Recursive : NonRecursive;
8062 auto I =
Map.find(ID);
8065 Entry &E = I->second;
8066 if (E.State == Recursive && IncompleteCount)
8069 if (E.State == Incomplete) {
8071 E.State = IncompleteUsed;
8072 ++IncompleteUsedCount;
8093 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8097 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
8098 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8099 llvm::MDString::get(Ctx, Enc.str())};
8100 llvm::NamedMDNode *MD =
8101 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
8102 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8115 unsigned getOpenCLKernelCallingConv()
const override;
8123 DefaultABIInfo SPIRABI(CGM.
getTypes());
8124 SPIRABI.computeInfo(FI);
8129 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
8130 return llvm::CallingConv::SPIR_KERNEL;
8135 TypeStringCache &TSC);
8143 TypeStringCache &TSC) {
8144 for (
const auto *Field : RD->
fields()) {
8147 Enc += Field->getName();
8149 if (Field->isBitField()) {
8151 llvm::raw_svector_ostream OS(Enc);
8152 OS << Field->getBitWidthValue(CGM.
getContext());
8155 if (!
appendType(Enc, Field->getType(), CGM, TSC))
8157 if (Field->isBitField())
8160 FE.emplace_back(!Field->getName().empty(), Enc);
8172 StringRef TypeString = TSC.lookupStr(ID);
8173 if (!TypeString.empty()) {
8179 size_t Start = Enc.size();
8187 bool IsRecursive =
false;
8194 std::string StubEnc(Enc.substr(Start).str());
8196 TSC.addIncomplete(ID, std::move(StubEnc));
8198 (void) TSC.removeIncomplete(ID);
8201 IsRecursive = TSC.removeIncomplete(ID);
8205 std::sort(FE.begin(), FE.end());
8207 unsigned E = FE.size();
8208 for (
unsigned I = 0; I !=
E; ++
I) {
8215 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8221 TypeStringCache &TSC,
8224 StringRef TypeString = TSC.lookupStr(ID);
8225 if (!TypeString.empty()) {
8230 size_t Start = Enc.size();
8239 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I !=
E;
8241 SmallStringEnc EnumEnc;
8243 EnumEnc += I->getName();
8245 I->getInitVal().toString(EnumEnc);
8247 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8249 std::sort(FE.begin(), FE.end());
8250 unsigned E = FE.size();
8251 for (
unsigned I = 0; I !=
E; ++
I) {
8258 TSC.addIfComplete(ID, Enc.substr(Start),
false);
8266 static const char *
const Table[]={
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
8274 Enc += Table[Lookup];
8279 const char *EncType;
8281 case BuiltinType::Void:
8284 case BuiltinType::Bool:
8287 case BuiltinType::Char_U:
8290 case BuiltinType::UChar:
8293 case BuiltinType::SChar:
8296 case BuiltinType::UShort:
8299 case BuiltinType::Short:
8302 case BuiltinType::UInt:
8305 case BuiltinType::Int:
8308 case BuiltinType::ULong:
8311 case BuiltinType::Long:
8314 case BuiltinType::ULongLong:
8317 case BuiltinType::LongLong:
8320 case BuiltinType::Float:
8323 case BuiltinType::Double:
8326 case BuiltinType::LongDouble:
8339 TypeStringCache &TSC) {
8351 TypeStringCache &TSC, StringRef NoSizeEnc) {
8356 CAT->getSize().toStringUnsigned(Enc);
8372 TypeStringCache &TSC) {
8379 auto I = FPT->param_type_begin();
8380 auto E = FPT->param_type_end();
8389 if (FPT->isVariadic())
8392 if (FPT->isVariadic())
8406 TypeStringCache &TSC) {
8443 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
8449 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
8452 QualType QT = VD->getType().getCanonicalType();
8474 if (TheTargetCodeGenInfo)
8475 return *TheTargetCodeGenInfo;
8479 this->TheTargetCodeGenInfo.reset(
P);
8484 switch (Triple.getArch()) {
8486 return SetCGInfo(
new DefaultTargetCodeGenInfo(Types));
8488 case llvm::Triple::le32:
8489 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
8490 case llvm::Triple::mips:
8491 case llvm::Triple::mipsel:
8492 if (Triple.getOS() == llvm::Triple::NaCl)
8493 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
8494 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
true));
8496 case llvm::Triple::mips64:
8497 case llvm::Triple::mips64el:
8498 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
false));
8500 case llvm::Triple::avr:
8501 return SetCGInfo(
new AVRTargetCodeGenInfo(Types));
8503 case llvm::Triple::aarch64:
8504 case llvm::Triple::aarch64_be: {
8505 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
8506 if (
getTarget().getABI() ==
"darwinpcs")
8507 Kind = AArch64ABIInfo::DarwinPCS;
8508 else if (Triple.isOSWindows())
8509 Kind = AArch64ABIInfo::Win64;
8511 return SetCGInfo(
new AArch64TargetCodeGenInfo(Types, Kind));
8514 case llvm::Triple::wasm32:
8515 case llvm::Triple::wasm64:
8516 return SetCGInfo(
new WebAssemblyTargetCodeGenInfo(Types));
8518 case llvm::Triple::arm:
8519 case llvm::Triple::armeb:
8520 case llvm::Triple::thumb:
8521 case llvm::Triple::thumbeb: {
8522 if (Triple.getOS() == llvm::Triple::Win32) {
8524 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
8527 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
8529 if (ABIStr ==
"apcs-gnu")
8530 Kind = ARMABIInfo::APCS;
8531 else if (ABIStr ==
"aapcs16")
8532 Kind = ARMABIInfo::AAPCS16_VFP;
8533 else if (CodeGenOpts.
FloatABI ==
"hard" ||
8535 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
8536 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
8537 Triple.getEnvironment() == llvm::Triple::EABIHF)))
8538 Kind = ARMABIInfo::AAPCS_VFP;
8540 return SetCGInfo(
new ARMTargetCodeGenInfo(Types, Kind));
8543 case llvm::Triple::ppc:
8545 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.
FloatABI ==
"soft"));
8546 case llvm::Triple::ppc64:
8547 if (Triple.isOSBinFormatELF()) {
8548 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
8550 Kind = PPC64_SVR4_ABIInfo::ELFv2;
8552 bool IsSoftFloat = CodeGenOpts.
FloatABI ==
"soft";
8554 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8557 return SetCGInfo(
new PPC64TargetCodeGenInfo(Types));
8558 case llvm::Triple::ppc64le: {
8559 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
8560 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
8562 Kind = PPC64_SVR4_ABIInfo::ELFv1;
8564 bool IsSoftFloat = CodeGenOpts.
FloatABI ==
"soft";
8566 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8570 case llvm::Triple::nvptx:
8571 case llvm::Triple::nvptx64:
8572 return SetCGInfo(
new NVPTXTargetCodeGenInfo(Types));
8574 case llvm::Triple::msp430:
8575 return SetCGInfo(
new MSP430TargetCodeGenInfo(Types));
8577 case llvm::Triple::systemz: {
8579 return SetCGInfo(
new SystemZTargetCodeGenInfo(Types, HasVector));
8582 case llvm::Triple::tce:
8583 case llvm::Triple::tcele:
8584 return SetCGInfo(
new TCETargetCodeGenInfo(Types));
8586 case llvm::Triple::x86: {
8587 bool IsDarwinVectorABI = Triple.isOSDarwin();
8588 bool RetSmallStructInRegABI =
8589 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
8590 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
8592 if (Triple.getOS() == llvm::Triple::Win32) {
8593 return SetCGInfo(
new WinX86_32TargetCodeGenInfo(
8594 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8595 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
8597 return SetCGInfo(
new X86_32TargetCodeGenInfo(
8598 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8599 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
8604 case llvm::Triple::x86_64: {
8608 ? X86AVXABILevel::AVX512
8611 switch (Triple.getOS()) {
8612 case llvm::Triple::Win32:
8613 return SetCGInfo(
new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
8614 case llvm::Triple::PS4:
8615 return SetCGInfo(
new PS4TargetCodeGenInfo(Types, AVXLevel));
8617 return SetCGInfo(
new X86_64TargetCodeGenInfo(Types, AVXLevel));
8620 case llvm::Triple::hexagon:
8621 return SetCGInfo(
new HexagonTargetCodeGenInfo(Types));
8622 case llvm::Triple::lanai:
8623 return SetCGInfo(
new LanaiTargetCodeGenInfo(Types));
8624 case llvm::Triple::r600:
8625 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
8626 case llvm::Triple::amdgcn:
8627 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
8628 case llvm::Triple::sparc:
8629 return SetCGInfo(
new SparcV8TargetCodeGenInfo(Types));
8630 case llvm::Triple::sparcv9:
8631 return SetCGInfo(
new SparcV9TargetCodeGenInfo(Types));
8632 case llvm::Triple::xcore:
8633 return SetCGInfo(
new XCoreTargetCodeGenInfo(Types));
8634 case llvm::Triple::spir:
8635 case llvm::Triple::spir64:
8636 return SetCGInfo(
new SPIRTargetCodeGenInfo(Types));
unsigned getAddressSpace() const
Return the address space of this type.
Ignore - Ignore the argument (treat as void).
FunctionDecl - An instance of this class is created to represent a function declaration or definition...
void setEffectiveCallingConvention(unsigned Value)
External linkage, which indicates that the entity can be referred to from other translation units...
static ABIArgInfo getExtend(llvm::Type *T=nullptr)
static bool occupiesMoreThan(CodeGenTypes &cgt, ArrayRef< llvm::Type * > scalarTypes, unsigned maxAllRegisters)
Does the given lowering require more than the given number of registers when expanded?
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
CodeGenTypes & getTypes()
llvm::Type * ConvertTypeForMem(QualType T)
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
CanQualType getReturnType() const
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
bool isBitField() const
Determines whether this field is a bitfield.
bool isMemberPointerType() const
unsigned getInAllocaFieldIndex() const
llvm::Module & getModule() const
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
FunctionType - C99 6.7.5.3 - Function Declarators.
llvm::ConstantInt * getSize(CharUnits N)
Extend - Valid only for integer argument types.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
CodeGen::ABIArgInfo getNaturalAlignIndirect(QualType Ty, bool ByRef=true, bool Realign=false, llvm::Type *Padding=nullptr) const
A convenience method to return an indirect ABIArgInfo with an expected alignment equal to the ABI ali...
bool isRecordType() const
Decl - This represents one declaration (or definition), e.g.
Address getAddress() const
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
CodeGen::CGCXXABI & getCXXABI() const
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
CGCXXABI & getCXXABI() const
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
bool hasFlexibleArrayMember() const
bool isEnumeralType() const
ASTContext & getContext() const
const llvm::DataLayout & getDataLayout() const
The base class of the type hierarchy.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Handles the type's qualifier before dispatching a call to handle specific type encodings.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
bool isBlockPointerType() const
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
virtual ~TargetCodeGenInfo()
void setCanBeFlattened(bool Flatten)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
VarDecl - An instance of this class is created to represent a variable declaration or definition...
llvm::Type * getElementType() const
Return the type of the values stored in this address.
CallingConv getCallConv() const
field_iterator field_begin() const
void setCoerceToType(llvm::Type *T)
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
virtual bool shouldSignExtUnsignedType(QualType Ty) const
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
Gets the linker options necessary to link a dependent library on this platform.
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
llvm::LLVMContext & getVMContext() const
RecordDecl - Represents a struct/union/class.
const_arg_iterator arg_end() const
static ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
CodeGen::CodeGenTypes & CGT
One of these records is kept for each identifier that is lexed.
bool isScalarType() const
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
llvm::IntegerType * Int64Ty
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
bool isReferenceType() const
bool isStructureOrClassType() const
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
static ABIArgInfo getExtendInReg(llvm::Type *T=nullptr)
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
ABIArgInfo classifyReturnType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to return a particular type.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
llvm::Type * getCoerceToType() const
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal=true, bool Realign=false)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends structure and union types to Enc and adds encoding to cache.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
CharUnits - This is an opaque type for sizes expressed in character units.
QualType getReturnType() const
field_range fields() const
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
RecordDecl * getDecl() const
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty, bool Realign=false) const
CharUnits getPointerSize() const
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static ABIArgInfo getExpand()
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
virtual StringRef getABI() const
Get the ABI currently in use.
detail::InMemoryDirectory::const_iterator I
llvm::StructType * getCoerceAndExpandType() const
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
std::string FloatABI
The ABI to use for passing floating point arguments.
field_iterator field_end() const
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor)
isTypeConstant - Determine whether an object of this type can be emitted as a constant.
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type...
EnumDecl * getDecl() const
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
unsigned getNumRequiredArgs() const
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
ContainsFloatAtOffset - Return true if the specified LLVM IR type has a float member at the specified...
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
static CharUnits One()
One - Construct a CharUnits quantity of one.
const llvm::DataLayout & getDataLayout() const
Represents a prototype with parameter type info, e.g.
bool isFloatingPoint() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Helper function for appendRecordType().
const TargetInfo & getTarget() const
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
void setAddress(Address address)
bool isRealFloatingType() const
Floating point categories.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Exposes information about the current target.
llvm::Value * getPointer() const
StringRef getName() const
Return the actual identifier string.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
bool isAnyComplexType() const
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
CharUnits getIndirectAlign() const
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
ASTContext & getContext() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isFloatingType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
The XCore ABI includes a type information section that communicates symbol type information to the li...
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
llvm::LLVMContext & getLLVMContext()
llvm::IntegerType * Int32Ty
static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
unsigned Map[FirstTargetAddressSpace]
The type of a lookup table which maps from language-specific address spaces to target-specific ones...
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
Represents a GCC generic vector type.
Implements C++ ABI-specific semantic analysis functions.
unsigned getRegParm() const
llvm::Type * getPaddingType() const
RecordDecl * getDefinition() const
getDefinition - Returns the RecordDecl that actually defines this struct/union/class.
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
The l-value was considered opaque, so the alignment was determined from a type.
Pass it as a pointer to temporary memory.
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
Appends type's qualifier to Enc.
static Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
unsigned getTypeAlign(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in bits.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
ASTContext & getContext() const
CharUnits getPointerAlign() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums...
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
bool isBuiltinType() const
Helper methods to distinguish type categories.
const llvm::DataLayout & getDataLayout() const
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
isEmptyRecord - Return true iff a structure contains only empty fields.
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a function encoding to Enc, calling appendType for the return type and the arguments...
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate. ...
CoerceAndExpand - Only valid for aggregate argument types.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
const CodeGenOptions & getCodeGenOpts() const
const LangOptions & getLangOpts() const
llvm::LLVMContext & getLLVMContext()
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
bool isVectorType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
bool isMemberFunctionPointerType() const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues, like target-specific attributes, builtins and so on.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
static llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
QualType getPointeeType() const
bool isSRetAfterThis() const
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T-> getSizeExpr()))
CGFunctionInfo - Class to encapsulate the information about a function definition.
CharUnits getAlignment() const
Return the alignment of this pointer.
This class organizes the cross-function state that is used while generating LLVM code.
bool canHaveCoerceToType() const
const CodeGenOptions & getCodeGenOpts() const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
A refining implementation of ABIInfo for targets that support swiftcall.
bool isZero() const
isZero - Test whether the quantity equals zero.
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
unsigned getDirectOffset() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::IntegerType * IntPtrTy
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
EnumDecl - Represents an enum.
detail::InMemoryDirectory::const_iterator E
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
const RecordType * getAsStructureType() const
llvm::PointerType * getType() const
Return the type of the pointer value.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Complex values, per C99 6.2.5p11.
const T * getAs() const
Member-template getAs<specific type>'.
virtual llvm::Optional< unsigned > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory...
QualType getCanonicalType() const
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize, const llvm::Twine &Name="")
Given addr = [n x T]* ...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LanguageLinkage getLanguageLinkage() const
Compute the language linkage.
Implements C++ ABI-specific code generation functions.
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
llvm::PointerType * Int8PtrTy
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
const TargetInfo & getTarget() const
Expand - Only valid for aggregate argument types.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
Represents a base class of a C++ class.
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
Pass it on the stack using its defined layout.
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, unsigned SrcAddr, unsigned DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Represents a C++ struct/union/class.
BoundNodesTreeBuilder *const Builder
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
llvm::Type * ConvertType(QualType T)
bool getHasRegParm() const
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo=LValueBaseInfo(AlignmentSource::Type))
ArraySizeModifier getSizeModifier() const
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorType::VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
This class is used for builtin types like 'int'.
const TargetInfo & getTarget() const
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, std::pair< CharUnits, CharUnits > ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
const_arg_iterator arg_begin() const
bool getIndirectByVal() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions...
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
unsigned getTargetAddressSpace(QualType T) const
virtual unsigned getSizeOfUnwindException() const
Determines the size of struct _Unwind_Exception on this platform, in 8-bit units. ...
const llvm::Triple & getTriple() const
QualType getElementType() const
virtual unsigned getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA...
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
bool getIndirectRealign() const
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
LValue - This represents an lvalue references.
ASTContext & getContext() const
void setInAllocaSRet(bool SRet)
EnumDecl * getDefinition() const
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
bool isConstQualified() const
Determine whether this type is const-qualified.
RecordArgABI
Specify how one should pass an argument of a record type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
const CodeGenOptions & getCodeGenOpts() const
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
CallArgList - Type for representing both the value and type of arguments in a call.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", bool CastToDefaultAddrSpace=true)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignment...
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
Represents the canonical version of C arrays with a specified constant size.
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
Attr - This represents one attribute.
bool supportsCOMDAT() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
Attempt to be ABI-compatible with code generated by Clang 3.8.x (SVN r257626).
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
bool isPointerType() const