24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/ADT/StringSwitch.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Type.h" 30 #include "llvm/Support/raw_ostream.h" 33 using namespace clang;
34 using namespace CodeGen;
52 llvm::LLVMContext &LLVMContext) {
56 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
57 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
67 for (
unsigned I = FirstIndex; I <= LastIndex; ++I) {
69 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
83 ByRef, Realign, Padding);
114 unsigned maxAllRegisters) {
115 unsigned intCount = 0, fpCount = 0;
117 if (
type->isPointerTy()) {
119 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
121 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
123 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
128 return (intCount + fpCount > maxAllRegisters);
133 unsigned numElts)
const {
163 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
164 !RT->getDecl()->canPassInRegisters()) {
177 if (UD->
hasAttr<TransparentUnionAttr>()) {
178 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
216 uint64_t Members)
const {
221 raw_ostream &OS = llvm::errs();
222 OS <<
"(ABIArgInfo Kind=";
225 OS <<
"Direct Type=";
238 OS <<
"InAlloca Offset=" << getInAllocaFieldIndex();
241 OS <<
"Indirect Align=" << getIndirectAlign().getQuantity()
242 <<
" ByVal=" << getIndirectByVal()
243 <<
" Realign=" << getIndirectRealign();
248 case CoerceAndExpand:
249 OS <<
"CoerceAndExpand Type=";
250 getCoerceAndExpandType()->print(OS);
263 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
265 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
267 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
269 Ptr->getName() +
".aligned");
293 bool AllowHigherAlign) {
303 if (AllowHigherAlign && DirectAlign > SlotSize) {
319 !DirectTy->isStructTy()) {
342 std::pair<CharUnits, CharUnits> ValueInfo,
344 bool AllowHigherAlign) {
351 DirectSize = ValueInfo.first;
352 DirectAlign = ValueInfo.second;
358 DirectTy = DirectTy->getPointerTo(0);
361 DirectSize, DirectAlign,
374 Address Addr1, llvm::BasicBlock *Block1,
375 Address Addr2, llvm::BasicBlock *Block2,
376 const llvm::Twine &Name =
"") {
378 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(Addr1.
getType(), 2, Name);
429 return llvm::CallingConv::SPIR_KERNEL;
433 llvm::PointerType *T,
QualType QT)
const {
434 return llvm::ConstantPointerNull::get(T);
441 "Address space agnostic languages only");
450 if (
auto *C = dyn_cast<llvm::Constant>(Src))
451 return performAddrSpaceCast(CGF.
CGM, C, SrcAddr, DestAddr, DestTy);
454 Src, DestTy, Src->hasName() ? Src->getName() +
".ascast" :
"");
463 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
469 llvm::AtomicOrdering Ordering,
470 llvm::LLVMContext &Ctx)
const {
471 return Ctx.getOrInsertSyncScopeID(
"");
489 if (AT->getSize() == 0)
491 FT = AT->getElementType();
502 if (isa<CXXRecordDecl>(RT->
getDecl()))
520 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
521 for (
const auto &I : CXXRD->bases())
525 for (
const auto *I : RD->
fields())
548 const Type *Found =
nullptr;
551 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
552 for (
const auto &I : CXXRD->bases()) {
570 for (
const auto *FD : RD->
fields()) {
584 if (AT->getSize().getZExtValue() != 1)
586 FT = AT->getElementType();
622 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
625 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
634 return Address(Addr, TyAlignForABI);
637 "Unexpected ArgInfo Kind in generic VAArg emitter!");
640 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
642 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
644 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
646 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
659 class DefaultABIInfo :
public ABIInfo {
670 I.info = classifyArgumentType(I.type);
699 Ty = EnumTy->getDecl()->getIntegerType();
714 RetTy = EnumTy->getDecl()->getIntegerType();
727 DefaultABIInfo defaultInfo;
744 Arg.info = classifyArgumentType(Arg.type);
751 bool asReturnValue)
const override {
755 bool isSwiftErrorInRegister()
const override {
765 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
768 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
769 if (
const auto *
Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
770 llvm::Function *Fn = cast<llvm::Function>(GV);
772 B.addAttribute(
"wasm-import-module",
Attr->getImportModule());
773 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
775 if (
const auto *
Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
776 llvm::Function *Fn = cast<llvm::Function>(GV);
778 B.addAttribute(
"wasm-import-name",
Attr->getImportName());
779 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
783 if (
auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
784 llvm::Function *Fn = cast<llvm::Function>(GV);
785 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
786 Fn->addFnAttr(
"no-prototype");
811 return defaultInfo.classifyArgumentType(Ty);
831 return defaultInfo.classifyReturnType(RetTy);
849 class PNaClABIInfo :
public ABIInfo {
894 Ty = EnumTy->getDecl()->getIntegerType();
914 RetTy = EnumTy->getDecl()->getIntegerType();
923 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
924 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
925 IRType->getScalarSizeInBits() != 64;
929 StringRef Constraint,
931 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
932 .Cases(
"y",
"&y",
"^Ym",
true)
934 if (IsMMXCons && Ty->isVectorTy()) {
935 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
951 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
952 if (BT->getKind() == BuiltinType::LongDouble) {
954 &llvm::APFloat::x87DoubleExtended())
963 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
971 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
972 return NumMembers <= 4;
989 CCState(
unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
993 unsigned FreeSSERegs;
998 VectorcallMaxParamNumAsReg = 6
1008 static const unsigned MinABIStackAlignInBytes = 4;
1010 bool IsDarwinVectorABI;
1011 bool IsRetSmallStructInRegABI;
1012 bool IsWin32StructABI;
1013 bool IsSoftFloatABI;
1015 unsigned DefaultNumRegisterParameters;
1017 static bool isRegisterSize(
unsigned Size) {
1018 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1023 return isX86VectorTypeForVectorCall(
getContext(), Ty);
1027 uint64_t NumMembers)
const override {
1029 return isX86VectorCallAggregateSmallEnough(NumMembers);
1041 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
1049 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
1051 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
1052 bool &NeedsPadding)
const;
1053 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
1055 bool canExpandIndirectArgument(
QualType Ty)
const;
1065 bool &UsedInAlloca)
const;
1074 bool RetSmallStructInRegABI,
bool Win32StructABI,
1075 unsigned NumRegisterParameters,
bool SoftFloatABI)
1076 :
SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1077 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1078 IsWin32StructABI(Win32StructABI),
1079 IsSoftFloatABI(SoftFloatABI),
1081 DefaultNumRegisterParameters(NumRegisterParameters) {}
1084 bool asReturnValue)
const override {
1092 bool isSwiftErrorInRegister()
const override {
1101 bool RetSmallStructInRegABI,
bool Win32StructABI,
1102 unsigned NumRegisterParameters,
bool SoftFloatABI)
1104 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1105 NumRegisterParameters, SoftFloatABI)) {}
1107 static bool isStructReturnInRegABI(
1110 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1123 StringRef Constraint,
1125 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1129 std::string &Constraints,
1130 std::vector<llvm::Type *> &ResultRegTypes,
1131 std::vector<llvm::Type *> &ResultTruncRegTypes,
1132 std::vector<LValue> &ResultRegDests,
1133 std::string &AsmString,
1134 unsigned NumOutputs)
const override;
1138 unsigned Sig = (0xeb << 0) |
1142 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1145 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
1146 return "movl\t%ebp, %ebp" 1147 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1162 unsigned NumNewOuts,
1163 std::string &AsmString) {
1165 llvm::raw_string_ostream OS(Buf);
1167 while (Pos < AsmString.size()) {
1168 size_t DollarStart = AsmString.find(
'$', Pos);
1169 if (DollarStart == std::string::npos)
1170 DollarStart = AsmString.size();
1171 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1172 if (DollarEnd == std::string::npos)
1173 DollarEnd = AsmString.size();
1174 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1176 size_t NumDollars = DollarEnd - DollarStart;
1177 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1179 size_t DigitStart = Pos;
1180 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1181 if (DigitEnd == std::string::npos)
1182 DigitEnd = AsmString.size();
1183 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1184 unsigned OperandIndex;
1185 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1186 if (OperandIndex >= FirstIn)
1187 OperandIndex += NumNewOuts;
1195 AsmString = std::move(OS.str());
1199 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1201 std::vector<llvm::Type *> &ResultRegTypes,
1202 std::vector<llvm::Type *> &ResultTruncRegTypes,
1203 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1204 unsigned NumOutputs)
const {
1209 if (!Constraints.empty())
1211 if (RetWidth <= 32) {
1212 Constraints +=
"={eax}";
1213 ResultRegTypes.push_back(CGF.
Int32Ty);
1216 Constraints +=
"=A";
1217 ResultRegTypes.push_back(CGF.
Int64Ty);
1222 ResultTruncRegTypes.push_back(CoerceTy);
1226 CoerceTy->getPointerTo()));
1227 ResultRegDests.push_back(ReturnSlot);
1234 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1240 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1246 if (Size == 64 || Size == 128)
1261 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1265 if (!RT)
return false;
1277 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1286 Ty = CTy->getElementType();
1296 return Size == 32 || Size == 64;
1301 for (
const auto *FD : RD->
fields()) {
1311 if (FD->isBitField())
1336 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1343 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1344 if (!IsWin32StructABI) {
1347 if (!CXXRD->isCLike())
1351 if (CXXRD->isDynamicClass())
1368 if (State.FreeRegs) {
1377 CCState &State)
const {
1382 uint64_t NumElts = 0;
1383 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1384 State.CC == llvm::CallingConv::X86_RegCall) &&
1392 if (IsDarwinVectorABI) {
1404 if ((Size == 8 || Size == 16 || Size == 32) ||
1405 (Size == 64 && VT->getNumElements() == 1))
1409 return getIndirectReturnResult(RetTy, State);
1418 if (RT->getDecl()->hasFlexibleArrayMember())
1419 return getIndirectReturnResult(RetTy, State);
1424 return getIndirectReturnResult(RetTy, State);
1432 if (shouldReturnTypeInRegister(RetTy,
getContext())) {
1441 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1442 || SeltTy->hasPointerRepresentation())
1450 return getIndirectReturnResult(RetTy, State);
1455 RetTy = EnumTy->getDecl()->getIntegerType();
1472 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1473 for (
const auto &I : CXXRD->bases())
1477 for (
const auto *
i : RD->
fields()) {
1490 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1491 unsigned Align)
const {
1494 if (Align <= MinABIStackAlignInBytes)
1498 if (!IsDarwinVectorABI) {
1500 return MinABIStackAlignInBytes;
1508 return MinABIStackAlignInBytes;
1512 CCState &State)
const {
1514 if (State.FreeRegs) {
1524 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1525 if (StackAlign == 0)
1530 bool Realign = TypeAlign > StackAlign;
1535 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1542 if (K == BuiltinType::Float || K == BuiltinType::Double)
1548 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1549 if (!IsSoftFloatABI) {
1550 Class C = classify(Ty);
1556 unsigned SizeInRegs = (Size + 31) / 32;
1558 if (SizeInRegs == 0)
1562 if (SizeInRegs > State.FreeRegs) {
1571 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1575 State.FreeRegs -= SizeInRegs;
1579 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1581 bool &NeedsPadding)
const {
1588 NeedsPadding =
false;
1591 if (!updateFreeRegs(Ty, State))
1597 if (State.CC == llvm::CallingConv::X86_FastCall ||
1598 State.CC == llvm::CallingConv::X86_VectorCall ||
1599 State.CC == llvm::CallingConv::X86_RegCall) {
1600 if (
getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1601 NeedsPadding =
true;
1609 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1610 if (!updateFreeRegs(Ty, State))
1616 if (State.CC == llvm::CallingConv::X86_FastCall ||
1617 State.CC == llvm::CallingConv::X86_VectorCall ||
1618 State.CC == llvm::CallingConv::X86_RegCall) {
1630 CCState &State)
const {
1640 return getIndirectResult(Ty,
false, State);
1650 uint64_t NumElts = 0;
1651 if (State.CC == llvm::CallingConv::X86_RegCall &&
1654 if (State.FreeSSERegs >= NumElts) {
1655 State.FreeSSERegs -= NumElts;
1660 return getIndirectResult(Ty,
false, State);
1667 return getIndirectResult(Ty,
true, State);
1674 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1675 bool NeedsPadding =
false;
1677 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1680 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1686 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1694 if (
getContext().getTypeSize(Ty) <= 4 * 32 &&
1695 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1697 State.CC == llvm::CallingConv::X86_FastCall ||
1698 State.CC == llvm::CallingConv::X86_VectorCall ||
1699 State.CC == llvm::CallingConv::X86_RegCall,
1702 return getIndirectResult(Ty,
true, State);
1708 if (IsDarwinVectorABI) {
1710 if ((Size == 8 || Size == 16 || Size == 32) ||
1711 (Size == 64 && VT->getNumElements() == 1))
1724 Ty = EnumTy->getDecl()->getIntegerType();
1726 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1739 void X86_32ABIInfo::computeVectorCallArgs(
CGFunctionInfo &FI, CCState &State,
1740 bool &UsedInAlloca)
const {
1754 uint64_t NumElts = 0;
1758 if (State.FreeSSERegs >= NumElts) {
1759 State.FreeSSERegs -= NumElts;
1771 uint64_t NumElts = 0;
1777 if (State.FreeSSERegs >= NumElts) {
1778 State.FreeSSERegs -= NumElts;
1779 I.info = getDirectX86Hva();
1781 I.info = getIndirectResult(Ty,
false, State);
1783 }
else if (!IsHva) {
1795 else if (State.CC == llvm::CallingConv::X86_FastCall)
1797 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1799 State.FreeSSERegs = 6;
1802 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1804 State.FreeSSERegs = 8;
1806 State.FreeRegs = DefaultNumRegisterParameters;
1813 if (State.FreeRegs) {
1824 bool UsedInAlloca =
false;
1825 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1826 computeVectorCallArgs(FI, State, UsedInAlloca);
1838 rewriteWithInAlloca(FI);
1848 assert(StackOffset.
isMultipleOf(FieldAlign) &&
"unaligned inalloca struct");
1855 StackOffset = FieldEnd.
alignTo(FieldAlign);
1856 if (StackOffset != FieldEnd) {
1857 CharUnits NumBytes = StackOffset - FieldEnd;
1859 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1860 FrameFields.push_back(Ty);
1885 llvm_unreachable(
"invalid enum");
1888 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1889 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1906 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1913 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1923 for (; I != E; ++I) {
1925 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1943 getTypeStackAlignInBytes(Ty,
TypeInfo.second.getQuantity()));
1950 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1952 assert(Triple.getArch() == llvm::Triple::x86);
1954 switch (Opts.getStructReturnConvention()) {
1963 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1966 switch (Triple.getOS()) {
1967 case llvm::Triple::DragonFly:
1968 case llvm::Triple::FreeBSD:
1969 case llvm::Triple::OpenBSD:
1970 case llvm::Triple::Win32:
1977 void X86_32TargetCodeGenInfo::setTargetAttributes(
1979 if (GV->isDeclaration())
1981 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1982 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1983 llvm::Function *Fn = cast<llvm::Function>(GV);
1984 Fn->addFnAttr(
"stackrealign");
1986 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1987 llvm::Function *Fn = cast<llvm::Function>(GV);
1988 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1993 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2016 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
2043 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
2045 case X86AVXABILevel::AVX512:
2047 case X86AVXABILevel::AVX:
2052 llvm_unreachable(
"Unknown AVXLevel");
2077 static Class merge(Class Accum, Class Field);
2093 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
2119 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2120 bool isNamedArg)
const;
2124 unsigned IROffset,
QualType SourceTy,
2125 unsigned SourceOffset)
const;
2127 unsigned IROffset,
QualType SourceTy,
2128 unsigned SourceOffset)
const;
2144 unsigned &neededInt,
unsigned &neededSSE,
2145 bool isNamedArg)
const;
2148 unsigned &NeededSSE)
const;
2151 unsigned &NeededSSE)
const;
2153 bool IsIllegalVectorType(
QualType Ty)
const;
2160 bool honorsRevision0_98()
const {
2166 bool classifyIntegerMMXAsSSE()
const {
2168 if (
getContext().getLangOpts().getClangABICompat() <=
2173 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2175 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2183 bool Has64BitPointers;
2188 Has64BitPointers(CGT.
getDataLayout().getPointerSize(0) == 8) {
2192 unsigned neededInt, neededSSE;
2198 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2199 return (vectorTy->getBitWidth() > 128);
2211 bool has64BitPointers()
const {
2212 return Has64BitPointers;
2216 bool asReturnValue)
const override {
2219 bool isSwiftErrorInRegister()
const override {
2229 IsMingw64(
getTarget().getTriple().isWindowsGNUEnvironment()) {}
2238 return isX86VectorTypeForVectorCall(
getContext(), Ty);
2242 uint64_t NumMembers)
const override {
2244 return isX86VectorCallAggregateSmallEnough(NumMembers);
2248 bool asReturnValue)
const override {
2252 bool isSwiftErrorInRegister()
const override {
2258 bool IsVectorCall,
bool IsRegCall)
const;
2261 void computeVectorCallArgs(
CGFunctionInfo &FI,
unsigned FreeSSERegs,
2262 bool IsVectorCall,
bool IsRegCall)
const;
2274 const X86_64ABIInfo &getABIInfo()
const {
2280 bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue()
const override {
2299 StringRef Constraint,
2301 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2304 bool isNoProtoCallVariadic(
const CallArgList &args,
2313 bool HasAVXType =
false;
2314 for (CallArgList::const_iterator
2315 it = args.begin(), ie = args.end(); it != ie; ++it) {
2316 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2331 unsigned Sig = (0xeb << 0) |
2335 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2338 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2340 if (GV->isDeclaration())
2342 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2343 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2344 llvm::Function *Fn = cast<llvm::Function>(GV);
2345 Fn->addFnAttr(
"stackrealign");
2347 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2348 llvm::Function *Fn = cast<llvm::Function>(GV);
2349 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2355 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2359 bool Quote = (Lib.find(
" ") != StringRef::npos);
2360 std::string ArgStr = Quote ?
"\"" :
"";
2362 if (!Lib.endswith_lower(
".lib") && !Lib.endswith_lower(
".a"))
2364 ArgStr += Quote ?
"\"" :
"";
2368 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2371 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2372 unsigned NumRegisterParameters)
2373 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2374 Win32StructABI, NumRegisterParameters,
false) {}
2376 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2379 void getDependentLibraryOption(llvm::StringRef Lib,
2381 Opt =
"/DEFAULTLIB:";
2382 Opt += qualifyWindowsLibrary(Lib);
2385 void getDetectMismatchOption(llvm::StringRef Name,
2386 llvm::StringRef
Value,
2388 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2392 static void addStackProbeTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2394 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2397 Fn->addFnAttr(
"stack-probe-size",
2400 Fn->addFnAttr(
"no-stack-arg-probe");
2404 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2406 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2407 if (GV->isDeclaration())
2409 addStackProbeTargetAttributes(D, GV, CGM);
2418 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2435 void getDependentLibraryOption(llvm::StringRef Lib,
2437 Opt =
"/DEFAULTLIB:";
2438 Opt += qualifyWindowsLibrary(Lib);
2441 void getDetectMismatchOption(llvm::StringRef Name,
2442 llvm::StringRef
Value,
2444 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2448 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2451 if (GV->isDeclaration())
2453 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2454 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2455 llvm::Function *Fn = cast<llvm::Function>(GV);
2456 Fn->addFnAttr(
"stackrealign");
2458 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2459 llvm::Function *Fn = cast<llvm::Function>(GV);
2460 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2464 addStackProbeTargetAttributes(D, GV, CGM);
2468 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2493 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2495 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2497 if (Hi == SSEUp && Lo != SSE)
2501 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2525 assert((Accum != Memory && Accum != ComplexX87) &&
2526 "Invalid accumulated classification during merge.");
2527 if (Accum == Field || Field == NoClass)
2529 if (Field == Memory)
2531 if (Accum == NoClass)
2535 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2536 Accum == X87 || Accum == X87Up)
2541 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
2542 Class &Lo, Class &Hi,
bool isNamedArg)
const {
2553 Class &Current = OffsetBase < 64 ? Lo : Hi;
2559 if (k == BuiltinType::Void) {
2561 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2564 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2566 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2568 }
else if (k == BuiltinType::LongDouble) {
2570 if (LDF == &llvm::APFloat::IEEEquad()) {
2573 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2576 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
2579 llvm_unreachable(
"unexpected long double representation!");
2588 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2599 if (Has64BitPointers) {
2606 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2607 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2608 if (EB_FuncPtr != EB_ThisAdj) {
2622 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2631 uint64_t EB_Lo = (OffsetBase) / 64;
2632 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2635 }
else if (Size == 64) {
2636 QualType ElementType = VT->getElementType();
2645 if (!classifyIntegerMMXAsSSE() &&
2656 if (OffsetBase && OffsetBase != 64)
2658 }
else if (Size == 128 ||
2659 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2687 else if (Size <= 128)
2695 if (LDF == &llvm::APFloat::IEEEquad())
2697 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2698 Current = ComplexX87;
2699 else if (LDF == &llvm::APFloat::IEEEdouble())
2702 llvm_unreachable(
"unexpected long double representation!");
2707 uint64_t EB_Real = (OffsetBase) / 64;
2709 if (Hi == NoClass && EB_Real != EB_Imag)
2729 if (OffsetBase %
getContext().getTypeAlign(AT->getElementType()))
2736 uint64_t ArraySize = AT->getSize().getZExtValue();
2743 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2746 for (uint64_t
i=0,
Offset=OffsetBase;
i<ArraySize; ++
i,
Offset += EltSize) {
2747 Class FieldLo, FieldHi;
2748 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2749 Lo = merge(Lo, FieldLo);
2750 Hi = merge(Hi, FieldHi);
2751 if (Lo == Memory || Hi == Memory)
2755 postMerge(Size, Lo, Hi);
2756 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2786 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2787 for (
const auto &I : CXXRD->bases()) {
2788 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2789 "Unexpected base class!");
2791 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2798 Class FieldLo, FieldHi;
2801 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2802 Lo = merge(Lo, FieldLo);
2803 Hi = merge(Hi, FieldHi);
2804 if (Lo == Memory || Hi == Memory) {
2805 postMerge(Size, Lo, Hi);
2814 i != e; ++
i, ++idx) {
2816 bool BitField =
i->isBitField();
2819 if (BitField &&
i->isUnnamedBitfield())
2829 if (Size > 128 && (Size !=
getContext().getTypeSize(
i->getType()) ||
2830 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2832 postMerge(Size, Lo, Hi);
2836 if (!BitField && Offset %
getContext().getTypeAlign(
i->getType())) {
2838 postMerge(Size, Lo, Hi);
2848 Class FieldLo, FieldHi;
2854 assert(!
i->isUnnamedBitfield());
2856 uint64_t Size =
i->getBitWidthValue(
getContext());
2858 uint64_t EB_Lo = Offset / 64;
2859 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2862 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2867 FieldHi = EB_Hi ?
Integer : NoClass;
2870 classify(
i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2871 Lo = merge(Lo, FieldLo);
2872 Hi = merge(Hi, FieldHi);
2873 if (Lo == Memory || Hi == Memory)
2877 postMerge(Size, Lo, Hi);
2887 Ty = EnumTy->getDecl()->getIntegerType();
2896 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2899 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2900 if (Size <= 64 || Size > LargestVector)
2908 unsigned freeIntRegs)
const {
2920 Ty = EnumTy->getDecl()->getIntegerType();
2954 if (freeIntRegs == 0) {
2959 if (Align == 8 && Size <= 64)
2976 if (isa<llvm::VectorType>(IRType) ||
2977 IRType->getTypeID() == llvm::Type::FP128TyID)
2982 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2985 return llvm::VectorType::get(llvm::Type::getDoubleTy(
getVMContext()),
3001 unsigned TySize = (unsigned)Context.
getTypeSize(Ty);
3002 if (TySize <= StartBit)
3006 unsigned EltSize = (unsigned)Context.
getTypeSize(AT->getElementType());
3007 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3010 for (
unsigned i = 0;
i != NumElts; ++
i) {
3012 unsigned EltOffset =
i*EltSize;
3013 if (EltOffset >= EndBit)
break;
3015 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3017 EndBit-EltOffset, Context))
3029 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3030 for (
const auto &I : CXXRD->bases()) {
3031 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3032 "Unexpected base class!");
3034 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
3038 if (BaseOffset >= EndBit)
continue;
3040 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3042 EndBit-BaseOffset, Context))
3053 i != e; ++
i, ++idx) {
3057 if (FieldOffset >= EndBit)
break;
3059 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3078 const llvm::DataLayout &TD) {
3080 if (IROffset == 0 && IRType->isFloatTy())
3084 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3085 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3086 unsigned Elt = SL->getElementContainingOffset(IROffset);
3087 IROffset -= SL->getElementOffset(Elt);
3092 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3094 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3095 IROffset -= IROffset/EltSize*EltSize;
3106 GetSSETypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3107 QualType SourceTy,
unsigned SourceOffset)
const {
3120 return llvm::VectorType::get(llvm::Type::getFloatTy(
getVMContext()), 2);
3141 GetINTEGERTypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3142 QualType SourceTy,
unsigned SourceOffset)
const {
3145 if (IROffset == 0) {
3147 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3148 IRType->isIntegerTy(64))
3157 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3158 IRType->isIntegerTy(32) ||
3159 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3160 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3161 cast<llvm::IntegerType>(IRType)->getBitWidth();
3169 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3171 const llvm::StructLayout *SL =
getDataLayout().getStructLayout(STy);
3172 if (IROffset < SL->getSizeInBytes()) {
3173 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3174 IROffset -= SL->getElementOffset(FieldIdx);
3176 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3177 SourceTy, SourceOffset);
3181 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3184 unsigned EltOffset = IROffset/EltSize*EltSize;
3185 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3191 unsigned TySizeInBytes =
3194 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
3199 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3210 const llvm::DataLayout &TD) {
3215 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3216 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3217 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3218 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
3230 if (Lo->isFloatTy())
3231 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3233 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3234 &&
"Invalid/unknown lo type");
3235 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3239 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3242 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3243 "Invalid x86-64 argument pair!");
3251 X86_64ABIInfo::Class Lo, Hi;
3252 classify(RetTy, 0, Lo, Hi,
true);
3255 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3256 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3265 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3266 "Unknown missing lo part");
3271 llvm_unreachable(
"Invalid classification for lo word.");
3276 return getIndirectReturnResult(RetTy);
3281 ResType = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3285 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3288 RetTy = EnumTy->getDecl()->getIntegerType();
3299 ResType = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3312 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3313 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(
getVMContext()),
3324 llvm_unreachable(
"Invalid classification for hi word.");
3331 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3336 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3347 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3348 ResType = GetByteVectorType(RetTy);
3359 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3376 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
3382 X86_64ABIInfo::Class Lo, Hi;
3383 classify(Ty, 0, Lo, Hi, isNamedArg);
3387 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3388 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3399 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3400 "Unknown missing lo part");
3413 return getIndirectResult(Ty, freeIntRegs);
3417 llvm_unreachable(
"Invalid classification for lo word.");
3430 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3433 Ty = EnumTy->getDecl()->getIntegerType();
3447 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3461 llvm_unreachable(
"Invalid classification for hi word.");
3463 case NoClass:
break;
3468 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(Ty), 8, Ty, 8);
3490 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3491 ResType = GetByteVectorType(Ty);
3505 X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
3506 unsigned &NeededSSE)
const {
3508 assert(RT &&
"classifyRegCallStructType only valid with struct types");
3510 if (RT->getDecl()->hasFlexibleArrayMember())
3511 return getIndirectReturnResult(Ty);
3514 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3515 if (CXXRD->isDynamicClass()) {
3516 NeededInt = NeededSSE = 0;
3517 return getIndirectReturnResult(Ty);
3520 for (
const auto &I : CXXRD->bases())
3521 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3523 NeededInt = NeededSSE = 0;
3524 return getIndirectReturnResult(Ty);
3529 for (
const auto *FD : RT->getDecl()->fields()) {
3530 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3531 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3533 NeededInt = NeededSSE = 0;
3534 return getIndirectReturnResult(Ty);
3537 unsigned LocalNeededInt, LocalNeededSSE;
3539 LocalNeededSSE,
true)
3541 NeededInt = NeededSSE = 0;
3542 return getIndirectReturnResult(Ty);
3544 NeededInt += LocalNeededInt;
3545 NeededSSE += LocalNeededSSE;
3553 unsigned &NeededInt,
3554 unsigned &NeededSSE)
const {
3559 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3568 if (CallingConv == llvm::CallingConv::Win64) {
3569 WinX86_64ABIInfo Win64ABIInfo(
CGT, AVXLevel);
3570 Win64ABIInfo.computeInfo(FI);
3574 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3577 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3578 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3579 unsigned NeededInt, NeededSSE;
3585 classifyRegCallStructType(FI.
getReturnType(), NeededInt, NeededSSE);
3586 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3587 FreeIntRegs -= NeededInt;
3588 FreeSSERegs -= NeededSSE;
3617 it != ie; ++it, ++ArgNo) {
3618 bool IsNamedArg = ArgNo < NumRequiredArgs;
3620 if (IsRegCall && it->type->isStructureOrClassType())
3621 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3624 NeededSSE, IsNamedArg);
3630 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3631 FreeIntRegs -= NeededInt;
3632 FreeSSERegs -= NeededSSE;
3634 it->info = getIndirectResult(it->type, FreeIntRegs);
3660 llvm::PointerType::getUnqual(LTy));
3669 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3670 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
3671 "overflow_arg_area.next");
3687 unsigned neededInt, neededSSE;
3695 if (!neededInt && !neededSSE)
3711 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3715 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3716 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3723 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3724 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3725 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3731 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3752 if (neededInt && neededSSE) {
3754 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3758 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3761 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3762 "Unexpected ABI info for mixed regs");
3763 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3764 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3767 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3768 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3784 }
else if (neededInt) {
3790 std::pair<CharUnits, CharUnits> SizeAlign =
3792 uint64_t TySize = SizeAlign.first.getQuantity();
3803 }
else if (neededSSE == 1) {
3808 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3827 RegAddrLo, ST->getStructElementType(0)));
3830 RegAddrHi, ST->getStructElementType(1)));
3873 WinX86_64ABIInfo::reclassifyHvaArgType(
QualType Ty,
unsigned &FreeSSERegs,
3876 const Type *
Base =
nullptr;
3877 uint64_t NumElts = 0;
3881 FreeSSERegs -= NumElts;
3882 return getDirectX86Hva();
3888 bool IsReturnType,
bool IsVectorCall,
3889 bool IsRegCall)
const {
3895 Ty = EnumTy->getDecl()->getIntegerType();
3898 uint64_t Width = Info.
Width;
3903 if (!IsReturnType) {
3913 const Type *
Base =
nullptr;
3914 uint64_t NumElts = 0;
3917 if ((IsVectorCall || IsRegCall) &&
3920 if (FreeSSERegs >= NumElts) {
3921 FreeSSERegs -= NumElts;
3927 }
else if (IsVectorCall) {
3928 if (FreeSSERegs >= NumElts &&
3930 FreeSSERegs -= NumElts;
3932 }
else if (IsReturnType) {
3945 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3952 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3960 switch (BT->getKind()) {
3961 case BuiltinType::Bool:
3966 case BuiltinType::LongDouble:
3971 if (LDF == &llvm::APFloat::x87DoubleExtended())
3976 case BuiltinType::Int128:
3977 case BuiltinType::UInt128:
3987 llvm::VectorType::get(llvm::Type::getInt64Ty(
getVMContext()), 2));
3998 unsigned FreeSSERegs,
4000 bool IsRegCall)
const {
4005 if (Count < VectorcallMaxParamNumAsReg)
4006 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4010 unsigned ZeroSSERegsAvail = 0;
4011 I.info = classify(I.type, ZeroSSERegsAvail,
false,
4012 IsVectorCall, IsRegCall);
4018 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4024 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4025 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4029 if (CC == llvm::CallingConv::X86_64_SysV) {
4030 X86_64ABIInfo SysVABIInfo(
CGT, AVXLevel);
4031 SysVABIInfo.computeInfo(FI);
4035 unsigned FreeSSERegs = 0;
4039 }
else if (IsRegCall) {
4046 IsVectorCall, IsRegCall);
4051 }
else if (IsRegCall) {
4057 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4060 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4068 bool IsIndirect =
false;
4074 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4086 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
4087 bool IsSoftFloatABI;
4093 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4117 Ty = CTy->getElementType();
4125 const Type *AlignTy =
nullptr;
4142 if (
getTarget().getTriple().isOSDarwin()) {
4144 TI.second = getParamTypeAlignment(Ty);
4152 const unsigned OverflowLimit = 8;
4180 if (isInt || IsSoftFloatABI) {
4189 if (isI64 || (isF64 && IsSoftFloatABI)) {
4190 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4191 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4195 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
4201 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4204 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4217 if (!(isInt || IsSoftFloatABI)) {
4226 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
4234 Builder.CreateAdd(NumRegs,
4235 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4246 Builder.
CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4254 Size =
TypeInfo.first.alignTo(OverflowAreaAlign);
4264 if (Align > OverflowAreaAlign) {
4274 Builder.
CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4301 llvm::IntegerType *i8 = CGF.
Int8Ty;
4302 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4303 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4304 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4346 static const unsigned GPRBits = 64;
4349 bool IsSoftFloatABI;
4353 bool IsQPXVectorTy(
const Type *Ty)
const {
4358 unsigned NumElements = VT->getNumElements();
4359 if (NumElements == 1)
4362 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4365 }
else if (VT->getElementType()->
4366 isSpecificBuiltinType(BuiltinType::Float)) {
4375 bool IsQPXVectorTy(
QualType Ty)
const {
4383 IsSoftFloatABI(SoftFloatABI) {}
4385 bool isPromotableTypeForABI(
QualType Ty)
const;
4393 uint64_t Members)
const override;
4411 if (IsQPXVectorTy(T) ||
4427 bool asReturnValue)
const override {
4431 bool isSwiftErrorInRegister()
const override {
4440 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX,
4454 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
4456 PPC64TargetCodeGenInfo(
CodeGenTypes &
CGT) : DefaultTargetCodeGenInfo(CGT) {}
4472 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
4475 Ty = EnumTy->getDecl()->getIntegerType();
4484 switch (BT->getKind()) {
4485 case BuiltinType::Int:
4486 case BuiltinType::UInt:
4500 Ty = CTy->getElementType();
4504 if (IsQPXVectorTy(Ty)) {
4515 const Type *AlignAsType =
nullptr;
4519 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
4522 AlignAsType = EltType;
4526 const Type *
Base =
nullptr;
4527 uint64_t Members = 0;
4528 if (!AlignAsType &&
Kind == ELFv2 &&
4533 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4538 }
else if (AlignAsType) {
4557 uint64_t &Members)
const {
4559 uint64_t NElements = AT->getSize().getZExtValue();
4564 Members *= NElements;
4573 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4574 for (
const auto &I : CXXRD->bases()) {
4579 uint64_t FldMembers;
4583 Members += FldMembers;
4587 for (
const auto *FD : RD->
fields()) {
4592 if (AT->getSize().getZExtValue() == 0)
4594 FT = AT->getElementType();
4604 uint64_t FldMembers;
4609 std::max(Members, FldMembers) : Members + FldMembers);
4623 Ty = CT->getElementType();
4639 QualType EltTy = VT->getElementType();
4640 unsigned NumElements =
4655 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4659 if (BT->getKind() == BuiltinType::Float ||
4660 BT->getKind() == BuiltinType::Double ||
4661 BT->getKind() == BuiltinType::LongDouble ||
4663 (BT->getKind() == BuiltinType::Float128))) {
4676 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4677 const Type *
Base, uint64_t Members)
const {
4687 return Members * NumRegs <= 8;
4703 else if (Size < 128) {
4713 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4717 const Type *Base =
nullptr;
4718 uint64_t Members = 0;
4719 if (
Kind == ELFv2 &&
4722 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4731 if (Bits > 0 && Bits <= 8 * GPRBits) {
4736 if (Bits <= GPRBits)
4738 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4742 uint64_t RegBits = ABIAlign * 8;
4743 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4745 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4754 TyAlign > ABIAlign);
4775 else if (Size < 128) {
4783 const Type *Base =
nullptr;
4784 uint64_t Members = 0;
4785 if (
Kind == ELFv2 &&
4788 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4794 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
4799 if (Bits > GPRBits) {
4800 CoerceTy = llvm::IntegerType::get(
getVMContext(), GPRBits);
4801 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4804 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4820 TypeInfo.second = getParamTypeAlignment(Ty);
4832 if (EltSize < SlotSize) {
4834 SlotSize * 2, SlotSize,
4841 SlotSize - EltSize);
4843 2 * SlotSize - EltSize);
4874 llvm::IntegerType *i8 = CGF.
Int8Ty;
4875 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4876 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4877 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4914 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4950 ABIKind getABIKind()
const {
return Kind; }
4951 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
4957 uint64_t Members)
const override;
4959 bool isIllegalVectorType(
QualType Ty)
const;
4966 it.info = classifyArgumentType(it.type);
4977 return Kind == Win64 ?
EmitMSVAArg(CGF, VAListAddr, Ty)
4978 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4979 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4986 bool asReturnValue)
const override {
4989 bool isSwiftErrorInRegister()
const override {
4994 unsigned elts)
const override;
5002 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5003 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5010 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
5012 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5014 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5017 llvm::Function *Fn = cast<llvm::Function>(GV);
5021 Fn->addFnAttr(
"sign-return-address",
5027 Fn->addFnAttr(
"sign-return-address-key",
5028 Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
5034 Fn->addFnAttr(
"branch-target-enforcement");
5038 class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
5040 WindowsAArch64TargetCodeGenInfo(
CodeGenTypes &
CGT, AArch64ABIInfo::ABIKind K)
5041 : AArch64TargetCodeGenInfo(CGT, K) {}
5043 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5046 void getDependentLibraryOption(llvm::StringRef Lib,
5048 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5051 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5053 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5057 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5059 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5060 if (GV->isDeclaration())
5062 addStackProbeTargetAttributes(D, GV, CGM);
5070 if (isIllegalVectorType(Ty)) {
5083 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 2);
5088 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 4);
5097 Ty = EnumTy->getDecl()->getIntegerType();
5115 if (IsEmpty || Size == 0) {
5121 if (IsEmpty && Size == 0)
5127 const Type *Base =
nullptr;
5128 uint64_t Members = 0;
5138 if (
getTarget().isRenderScriptTarget()) {
5142 if (
Kind == AArch64ABIInfo::AAPCS) {
5144 Alignment = Alignment < 128 ? 64 : 128;
5148 Size = llvm::alignTo(Size, 64);
5152 if (Alignment < 128 && Size == 128) {
5173 RetTy = EnumTy->getDecl()->getIntegerType();
5184 const Type *Base =
nullptr;
5185 uint64_t Members = 0;
5194 if (
getTarget().isRenderScriptTarget()) {
5198 Size = llvm::alignTo(Size, 64);
5202 if (Alignment < 128 && Size == 128) {
5213 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
5216 unsigned NumElements = VT->getNumElements();
5219 if (!llvm::isPowerOf2_32(NumElements))
5221 return Size != 64 && (Size != 128 || NumElements == 1);
5226 bool AArch64ABIInfo::isLegalVectorTypeForSwift(
CharUnits totalSize,
5228 unsigned elts)
const {
5229 if (!llvm::isPowerOf2_32(elts))
5237 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5243 if (BT->isFloatingPoint())
5247 if (VecSize == 64 || VecSize == 128)
5253 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5254 uint64_t Members)
const {
5255 return Members <= 4;
5266 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5270 unsigned NumRegs = 1;
5271 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5272 BaseTy = ArrTy->getElementType();
5273 NumRegs = ArrTy->getNumElements();
5275 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5299 int RegSize = IsIndirect ? 8 : TySize.
getQuantity();
5305 RegSize = llvm::alignTo(RegSize, 8);
5311 RegSize = 16 * NumRegs;
5323 UsingStack = CGF.
Builder.CreateICmpSGE(
5324 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
5326 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5335 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
5338 reg_offs = CGF.
Builder.CreateAdd(
5339 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
5341 reg_offs = CGF.
Builder.CreateAnd(
5342 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
5351 NewOffset = CGF.
Builder.CreateAdd(
5352 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
5358 InRegs = CGF.
Builder.CreateICmpSLE(
5359 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
5361 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5375 Address BaseAddr(CGF.
Builder.CreateInBoundsGEP(reg_top, reg_offs),
5383 MemTy = llvm::PointerType::getUnqual(MemTy);
5386 const Type *Base =
nullptr;
5387 uint64_t NumMembers = 0;
5389 if (IsHFA && NumMembers > 1) {
5394 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
5397 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5399 std::max(TyAlign, BaseTyInfo.second));
5404 BaseTyInfo.first.getQuantity() < 16)
5405 Offset = 16 - BaseTyInfo.first.getQuantity();
5407 for (
unsigned i = 0;
i < NumMembers; ++
i) {
5424 CharUnits SlotSize = BaseAddr.getAlignment();
5427 TySize < SlotSize) {
5450 OnStackPtr = CGF.
Builder.CreatePtrToInt(OnStackPtr, CGF.
Int64Ty);
5452 OnStackPtr = CGF.
Builder.CreateAdd(
5453 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
5455 OnStackPtr = CGF.
Builder.CreateAnd(
5456 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
5461 Address OnStackAddr(OnStackPtr,
5468 StackSize = StackSlotSize;
5470 StackSize = TySize.
alignTo(StackSlotSize);
5474 CGF.
Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC,
"new_stack");
5480 TySize < StackSlotSize) {
5495 OnStackAddr, OnStackBlock,
"vaargs.addr");
5527 bool IsIndirect =
false;
5528 if (TyInfo.first.getQuantity() > 16) {
5529 const Type *Base =
nullptr;
5530 uint64_t Members = 0;
5535 TyInfo, SlotSize,
true);
5570 bool isEABI()
const {
5571 switch (
getTarget().getTriple().getEnvironment()) {
5572 case llvm::Triple::Android:
5573 case llvm::Triple::EABI:
5574 case llvm::Triple::EABIHF:
5575 case llvm::Triple::GNUEABI:
5576 case llvm::Triple::GNUEABIHF:
5577 case llvm::Triple::MuslEABI:
5578 case llvm::Triple::MuslEABIHF:
5585 bool isEABIHF()
const {
5586 switch (
getTarget().getTriple().getEnvironment()) {
5587 case llvm::Triple::EABIHF:
5588 case llvm::Triple::GNUEABIHF:
5589 case llvm::Triple::MuslEABIHF:
5596 ABIKind getABIKind()
const {
return Kind; }
5600 unsigned functionCallConv)
const;
5602 unsigned functionCallConv)
const;
5604 uint64_t Members)
const;
5606 bool isIllegalVectorType(
QualType Ty)
const;
5607 bool containsAnyFP16Vectors(
QualType Ty)
const;
5611 uint64_t Members)
const override;
5613 bool isEffectivelyAAPCS_VFP(
unsigned callConvention,
bool acceptHalf)
const;
5625 bool asReturnValue)
const override {
5628 bool isSwiftErrorInRegister()
const override {
5632 unsigned elts)
const override;
5640 const ARMABIInfo &getABIInfo()
const {
5648 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5649 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5661 unsigned getSizeOfUnwindException()
const override {
5662 if (getABIInfo().isEABI())
return 88;
5666 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5668 if (GV->isDeclaration())
5670 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5674 const ARMInterruptAttr *
Attr = FD->
getAttr<ARMInterruptAttr>();
5679 switch (Attr->getInterrupt()) {
5680 case ARMInterruptAttr::Generic: Kind =
"";
break;
5681 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
5682 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
5683 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
5684 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
5685 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
5688 llvm::Function *Fn = cast<llvm::Function>(GV);
5690 Fn->addFnAttr(
"interrupt", Kind);
5692 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5693 if (ABI == ARMABIInfo::APCS)
5699 llvm::AttrBuilder B;
5700 B.addStackAlignmentAttr(8);
5701 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5705 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
5708 : ARMTargetCodeGenInfo(CGT, K) {}
5710 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5713 void getDependentLibraryOption(llvm::StringRef Lib,
5715 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5718 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5720 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5724 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5726 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5727 if (GV->isDeclaration())
5729 addStackProbeTargetAttributes(D, GV, CGM);
5755 if (isEABIHF() ||
getTarget().getTriple().isWatchABI())
5756 return llvm::CallingConv::ARM_AAPCS_VFP;
5758 return llvm::CallingConv::ARM_AAPCS;
5760 return llvm::CallingConv::ARM_APCS;
5766 switch (getABIKind()) {
5767 case APCS:
return llvm::CallingConv::ARM_APCS;
5768 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
5769 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5770 case AAPCS16_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5772 llvm_unreachable(
"bad ABI kind");
5775 void ARMABIInfo::setCCs() {
5781 if (abiCC != getLLVMDefaultCC())
5792 if (Size == 64 || Size == 128) {
5802 uint64_t Members)
const {
5803 assert(Base &&
"Base class should be set for homogeneous aggregate");
5807 if (!
getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
5809 llvm::Type *NewVecTy = llvm::VectorType::get(
5811 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
5819 unsigned functionCallConv)
const {
5829 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv,
false);
5834 if (isIllegalVectorType(Ty))
5835 return coerceIllegalVector(Ty);
5851 Ty = EnumTy->getDecl()->getIntegerType();
5869 const Type *Base =
nullptr;
5870 uint64_t Members = 0;
5872 return classifyHomogeneousAggregate(Ty, Base, Members);
5873 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5877 const Type *Base =
nullptr;
5878 uint64_t Members = 0;
5880 assert(Base && Members <= 4 &&
"unexpected homogeneous aggregate");
5887 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5900 uint64_t ABIAlign = 4;
5902 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5903 getABIKind() == ARMABIInfo::AAPCS) {
5910 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP &&
"unexpected byval");
5913 TyAlign > ABIAlign);
5918 if (
getTarget().isRenderScriptTarget()) {
5939 llvm::LLVMContext &VMContext) {
5971 if (!RT)
return false;
5982 bool HadField =
false;
5985 i != e; ++
i, ++idx) {
6024 unsigned functionCallConv)
const {
6028 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv,
true);
6039 (VT->getElementType()->isFloat16Type() ||
6040 VT->getElementType()->isHalfType()))
6041 return coerceIllegalVector(RetTy);
6058 RetTy = EnumTy->getDecl()->getIntegerType();
6065 if (getABIKind() == APCS) {
6099 const Type *Base =
nullptr;
6100 uint64_t Members = 0;
6102 return classifyHomogeneousAggregate(RetTy, Base, Members);
6111 if (
getTarget().isRenderScriptTarget()) {
6124 }
else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6127 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6135 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
6141 (VT->getElementType()->isFloat16Type() ||
6142 VT->getElementType()->isHalfType()))
6150 unsigned NumElements = VT->getNumElements();
6152 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6156 unsigned NumElements = VT->getNumElements();
6159 if (!llvm::isPowerOf2_32(NumElements))
6169 bool ARMABIInfo::containsAnyFP16Vectors(
QualType Ty)
const {
6171 uint64_t NElements = AT->getSize().getZExtValue();
6174 return containsAnyFP16Vectors(AT->getElementType());
6179 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6181 return containsAnyFP16Vectors(B.getType());
6186 return FD && containsAnyFP16Vectors(FD->getType());
6193 return (VT->getElementType()->isFloat16Type() ||
6194 VT->getElementType()->isHalfType());
6199 bool ARMABIInfo::isLegalVectorTypeForSwift(
CharUnits vectorSize,
6201 unsigned numElts)
const {
6202 if (!llvm::isPowerOf2_32(numElts))
6204 unsigned size =
getDataLayout().getTypeStoreSizeInBits(eltTy);
6213 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
6217 if (BT->getKind() == BuiltinType::Float ||
6218 BT->getKind() == BuiltinType::Double ||
6219 BT->getKind() == BuiltinType::LongDouble)
6223 if (VecSize == 64 || VecSize == 128)
6229 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
6230 uint64_t Members)
const {
6231 return Members <= 4;
6234 bool ARMABIInfo::isEffectivelyAAPCS_VFP(
unsigned callConvention,
6235 bool acceptHalf)
const {
6238 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
6240 return (getABIKind() == AAPCS_VFP) ||
6241 (acceptHalf && (getABIKind() == AAPCS16_VFP));
6259 bool IsIndirect =
false;
6260 const Type *Base =
nullptr;
6261 uint64_t Members = 0;
6268 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6276 }
else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6277 getABIKind() == ARMABIInfo::AAPCS) {
6280 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6288 std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI };
6299 class NVPTXABIInfo :
public ABIInfo {
6316 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6318 bool shouldEmitStaticExternCAliases()
const override;
6323 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
6338 return isUnsupportedType(Context, AT->getElementType());
6345 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6347 if (isUnsupportedType(Context, I.getType()))
6351 if (isUnsupportedType(Context, I->getType()))
6358 llvm::LLVMContext &LLVMContext,
6363 const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
6364 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Div);
6365 const uint64_t NumElements = (Size + Div - 1) / Div;
6384 RetTy = EnumTy->getDecl()->getIntegerType();
6393 Ty = EnumTy->getDecl()->getIntegerType();
6418 llvm_unreachable(
"NVPTX does not support varargs");
6421 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6423 if (GV->isDeclaration())
6425 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6428 llvm::Function *F = cast<llvm::Function>(GV);
6434 if (FD->
hasAttr<OpenCLKernelAttr>()) {
6437 addNVVMMetadata(F,
"kernel", 1);
6439 F->addFnAttr(llvm::Attribute::NoInline);
6448 if (FD->
hasAttr<CUDAGlobalAttr>()) {
6450 addNVVMMetadata(F,
"kernel", 1);
6452 if (CUDALaunchBoundsAttr *
Attr = FD->
getAttr<CUDALaunchBoundsAttr>()) {
6454 llvm::APSInt MaxThreads(32);
6455 MaxThreads =
Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
6457 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
6462 if (
Attr->getMinBlocks()) {
6463 llvm::APSInt MinBlocks(32);
6464 MinBlocks =
Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
6467 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
6473 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6475 llvm::Module *M = F->getParent();
6479 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
6481 llvm::Metadata *MDVals[] = {
6482 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6483 llvm::ConstantAsMetadata::get(
6484 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6486 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6489 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
6507 bool isPromotableIntegerType(
QualType Ty)
const;
6508 bool isCompoundType(
QualType Ty)
const;
6509 bool isVectorArgumentType(
QualType Ty)
const;
6510 bool isFPArgumentType(
QualType Ty)
const;
6520 I.info = classifyArgumentType(I.type);
6527 bool asReturnValue)
const override {
6530 bool isSwiftErrorInRegister()
const override {
6543 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
6546 Ty = EnumTy->getDecl()->getIntegerType();
6554 switch (BT->getKind()) {
6555 case BuiltinType::Int:
6556 case BuiltinType::UInt:
6564 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
6570 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
6571 return (HasVector &&
6576 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
6578 switch (BT->getKind()) {
6579 case BuiltinType::Float:
6580 case BuiltinType::Double:
6595 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6596 for (
const auto &I : CXXRD->bases()) {
6605 Found = GetSingleElementType(Base);
6609 for (
const auto *FD : RD->
fields()) {
6621 Found = GetSingleElementType(FD->getType());
6652 bool InFPRs =
false;
6653 bool IsVector =
false;
6657 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6662 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6663 IsVector = ArgTy->isVectorTy();
6664 UnpaddedSize = TyInfo.first;
6665 DirectAlign = TyInfo.second;
6668 if (IsVector && UnpaddedSize > PaddedSize)
6670 assert((UnpaddedSize <= PaddedSize) &&
"Invalid argument size.");
6672 CharUnits Padding = (PaddedSize - UnpaddedSize);
6676 llvm::ConstantInt::get(IndexTy, PaddedSize.
getQuantity());
6693 "overflow_arg_area");
6701 unsigned MaxRegs, RegCountField, RegSaveIndex;
6712 RegPadding = Padding;
6718 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6725 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6732 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
6734 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.
getQuantity()
6737 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
6749 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6751 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
6772 "overflow_arg_area");
6779 MemAddr, InMemBlock,
"va_arg.addr");
6791 if (isVectorArgumentType(RetTy))
6805 if (isPromotableIntegerType(Ty))
6812 QualType SingleElementTy = GetSingleElementType(Ty);
6813 if (isVectorArgumentType(SingleElementTy) &&
6814 getContext().getTypeSize(SingleElementTy) == Size)
6818 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6831 if (isFPArgumentType(SingleElementTy)) {
6832 assert(Size == 32 || Size == 64);
6843 if (isCompoundType(Ty))
6859 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6865 void MSP430TargetCodeGenInfo::setTargetAttributes(
6867 if (GV->isDeclaration())
6869 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6870 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
6875 llvm::Function *F = cast<llvm::Function>(GV);
6878 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6881 F->addFnAttr(llvm::Attribute::NoInline);
6882 F->addFnAttr(
"interrupt", llvm::utostr(InterruptAttr->getNumber()));
6892 class MipsABIInfo :
public ABIInfo {
6894 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6895 void CoerceToIntArgs(uint64_t TySize,
6902 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6903 StackAlignInBytes(IsO32 ? 8 : 16) {}
6914 unsigned SizeOfUnwindException;
6918 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6924 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6926 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6928 llvm::Function *Fn = cast<llvm::Function>(GV);
6930 if (FD->
hasAttr<MipsLongCallAttr>())
6931 Fn->addFnAttr(
"long-call");
6932 else if (FD->
hasAttr<MipsShortCallAttr>())
6933 Fn->addFnAttr(
"short-call");
6936 if (GV->isDeclaration())
6939 if (FD->
hasAttr<Mips16Attr>()) {
6940 Fn->addFnAttr(
"mips16");
6942 else if (FD->
hasAttr<NoMips16Attr>()) {
6943 Fn->addFnAttr(
"nomips16");
6946 if (FD->
hasAttr<MicroMipsAttr>())
6947 Fn->addFnAttr(
"micromips");
6948 else if (FD->
hasAttr<NoMicroMipsAttr>())
6949 Fn->addFnAttr(
"nomicromips");
6951 const MipsInterruptAttr *
Attr = FD->
getAttr<MipsInterruptAttr>();
6956 switch (Attr->getInterrupt()) {
6957 case MipsInterruptAttr::eic: Kind =
"eic";
break;
6958 case MipsInterruptAttr::sw0: Kind =
"sw0";
break;
6959 case MipsInterruptAttr::sw1: Kind =
"sw1";
break;
6960 case MipsInterruptAttr::hw0: Kind =
"hw0";
break;
6961 case MipsInterruptAttr::hw1: Kind =
"hw1";
break;
6962 case MipsInterruptAttr::hw2: Kind =
"hw2";
break;
6963 case MipsInterruptAttr::hw3: Kind =
"hw3";
break;
6964 case MipsInterruptAttr::hw4: Kind =
"hw4";
break;
6965 case MipsInterruptAttr::hw5: Kind =
"hw5";
break;
6968 Fn->addFnAttr(
"interrupt", Kind);
6975 unsigned getSizeOfUnwindException()
const override {
6976 return SizeOfUnwindException;
6981 void MipsABIInfo::CoerceToIntArgs(
6983 llvm::IntegerType *IntTy =
6984 llvm::IntegerType::get(
getVMContext(), MinABIStackAlignInBytes * 8);
6987 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6988 ArgList.push_back(IntTy);
6991 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6994 ArgList.push_back(llvm::IntegerType::get(
getVMContext(), R));
7003 CoerceToIntArgs(TySize, ArgList);
7014 CoerceToIntArgs(TySize, ArgList);
7020 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
7022 uint64_t LastOffset = 0;
7024 llvm::IntegerType *I64 = llvm::IntegerType::get(
getVMContext(), 64);
7029 i != e; ++
i, ++idx) {
7033 if (!BT || BT->
getKind() != BuiltinType::Double)
7041 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
7042 ArgList.push_back(I64);
7045 ArgList.push_back(llvm::Type::getDoubleTy(
getVMContext()));
7046 LastOffset = Offset + 64;
7049 CoerceToIntArgs(TySize - LastOffset, IntArgList);
7050 ArgList.append(IntArgList.begin(), IntArgList.end());
7055 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
7057 if (OrigOffset + MinABIStackAlignInBytes > Offset)
7060 return llvm::IntegerType::get(
getVMContext(), (Offset - OrigOffset) * 8);
7067 uint64_t OrigOffset =
Offset;
7072 (uint64_t)StackAlignInBytes);
7073 unsigned CurrOffset = llvm::alignTo(Offset, Align);
7074 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
7082 Offset = OrigOffset + MinABIStackAlignInBytes;
7091 getPaddingType(OrigOffset, CurrOffset));
7098 Ty = EnumTy->getDecl()->getIntegerType();
7102 return extendType(Ty);
7105 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
7109 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
7129 for (; b != e; ++
b) {
7146 CoerceToIntArgs(Size, RTList);
7158 if (!IsO32 && Size == 0)
7182 RetTy = EnumTy->getDecl()->getIntegerType();
7212 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7214 bool DidPromote =
false;
7234 TyInfo, ArgSlotSize,
true);
7305 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7307 if (GV->isDeclaration())
7309 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7311 auto *Fn = cast<llvm::Function>(GV);
7313 if (FD->getAttr<AVRInterruptAttr>())
7314 Fn->addFnAttr(
"interrupt");
7316 if (FD->getAttr<AVRSignalAttr>())
7317 Fn->addFnAttr(
"signal");
7330 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
7333 : DefaultTargetCodeGenInfo(CGT) {}
7335 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7339 void TCETargetCodeGenInfo::setTargetAttributes(
7341 if (GV->isDeclaration())
7343 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7346 llvm::Function *F = cast<llvm::Function>(GV);
7349 if (FD->
hasAttr<OpenCLKernelAttr>()) {
7351 F->addFnAttr(llvm::Attribute::NoInline);
7352 const ReqdWorkGroupSizeAttr *
Attr = FD->
getAttr<ReqdWorkGroupSizeAttr>();
7355 llvm::LLVMContext &Context = F->getContext();
7356 llvm::NamedMDNode *OpenCLMetadata =
7358 "opencl.kernel_wg_size_info");
7361 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7364 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7365 M.
Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7367 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7368 M.
Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7370 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7371 M.
Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7377 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7378 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7392 class HexagonABIInfo :
public ABIInfo {
7432 Ty = EnumTy->getDecl()->getIntegerType();
7470 RetTy = EnumTy->getDecl()->getIntegerType();
7510 class LanaiABIInfo :
public DefaultABIInfo {
7514 bool shouldUseInReg(
QualType Ty, CCState &State)
const;
7537 bool LanaiABIInfo::shouldUseInReg(
QualType Ty, CCState &State)
const {
7539 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7541 if (SizeInRegs == 0)
7544 if (SizeInRegs > State.FreeRegs) {
7549 State.FreeRegs -= SizeInRegs;
7555 CCState &State)
const {
7557 if (State.FreeRegs) {
7565 const unsigned MinABIStackAlignInBytes = 4;
7569 MinABIStackAlignInBytes);
7573 CCState &State)
const {
7579 return getIndirectResult(Ty,
false, State);
7588 return getIndirectResult(Ty,
true, State);
7596 if (SizeInRegs <= State.FreeRegs) {
7597 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7599 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7600 State.FreeRegs -= SizeInRegs;
7605 return getIndirectResult(Ty,
true, State);
7610 Ty = EnumTy->getDecl()->getIntegerType();
7612 bool InReg = shouldUseInReg(Ty, State);
7637 class AMDGPUABIInfo final :
public DefaultABIInfo {
7639 static const unsigned MaxNumRegsForArgsRet = 16;
7641 unsigned numRegsForType(
QualType Ty)
const;
7645 uint64_t Members)
const override;
7649 DefaultABIInfo(CGT) {}
7658 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
7662 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7663 const Type *Base, uint64_t Members)
const {
7667 return Members * NumRegs <= MaxNumRegsForArgsRet;
7671 unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
7672 unsigned NumRegs = 0;
7677 QualType EltTy = VT->getElementType();
7682 return (VT->getNumElements() + 1) / 2;
7684 unsigned EltNumRegs = (EltSize + 31) / 32;
7685 return EltNumRegs * VT->getNumElements();
7693 QualType FieldTy = Field->getType();
7694 NumRegs += numRegsForType(FieldTy);
7700 return (
getContext().getTypeSize(Ty) + 31) / 32;
7709 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7711 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7712 Arg.info = classifyKernelArgumentType(Arg.type);
7751 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7778 unsigned &NumRegsLeft)
const {
7779 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
7808 unsigned NumRegs = (Size + 31) / 32;
7809 NumRegsLeft -=
std::min(NumRegsLeft, NumRegs);
7822 if (NumRegsLeft > 0) {
7823 unsigned NumRegs = numRegsForType(Ty);
7824 if (NumRegsLeft >= NumRegs) {
7825 NumRegsLeft -= NumRegs;
7834 unsigned NumRegs = numRegsForType(Ty);
7835 NumRegsLeft -=
std::min(NumRegs, NumRegsLeft);
7845 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7847 unsigned getOpenCLKernelCallingConv()
const override;
7850 llvm::PointerType *T,
QualType QT)
const override;
7852 LangAS getASTAllocaAddressSpace()
const override {
7857 const VarDecl *D)
const override;
7860 llvm::AtomicOrdering Ordering,
7861 llvm::LLVMContext &Ctx)
const override;
7864 llvm::Function *BlockInvokeFunc,
7866 bool shouldEmitStaticExternCAliases()
const override;
7872 llvm::GlobalValue *GV) {
7876 return D->
hasAttr<OpenCLKernelAttr>() ||
7877 (isa<FunctionDecl>(D) && D->
hasAttr<CUDAGlobalAttr>()) ||
7879 (D->
hasAttr<CUDADeviceAttr>() || D->
hasAttr<CUDAConstantAttr>() ||
7880 D->
hasAttr<HIPPinnedShadowAttr>()));
7884 llvm::GlobalValue *GV) {
7888 return isa<VarDecl>(D) && D->
hasAttr<HIPPinnedShadowAttr>();
7891 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7895 GV->setDSOLocal(
false);
7898 GV->setDSOLocal(
true);
7901 if (GV->isDeclaration())
7903 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7907 llvm::Function *F = cast<llvm::Function>(GV);
7910 FD->
getAttr<ReqdWorkGroupSizeAttr>() :
nullptr;
7914 (M.
getTriple().getOS() == llvm::Triple::AMDHSA))
7915 F->addFnAttr(
"amdgpu-implicitarg-num-bytes",
"56");
7917 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7918 if (ReqdWGS || FlatWGS) {
7922 Min = FlatWGS->getMin()
7925 Max = FlatWGS->getMax()
7929 if (ReqdWGS && Min == 0 && Max == 0)
7930 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7933 assert(Min <= Max &&
"Min must be less than or equal Max");
7935 std::string AttrVal = llvm::utostr(Min) +
"," + llvm::utostr(Max);
7936 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
7938 assert(Max == 0 &&
"Max must be zero");
7941 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>()) {
7943 Attr->getMin()->EvaluateKnownConstInt(M.
getContext()).getExtValue();
7944 unsigned Max =
Attr->getMax() ?
Attr->getMax()
7950 assert((Max == 0 || Min <= Max) &&
"Min must be less than or equal Max");
7952 std::string AttrVal = llvm::utostr(Min);
7954 AttrVal = AttrVal +
"," + llvm::utostr(Max);
7955 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
7957 assert(Max == 0 &&
"Max must be zero");
7960 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
7961 unsigned NumSGPR =
Attr->getNumSGPR();
7964 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7967 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
7968 uint32_t NumVGPR =
Attr->getNumVGPR();
7971 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7975 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
7976 return llvm::CallingConv::AMDGPU_KERNEL;
7984 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7988 return llvm::ConstantPointerNull::get(PT);
7991 auto NPT = llvm::PointerType::get(PT->getElementType(),
7993 return llvm::ConstantExpr::getAddrSpaceCast(
7994 llvm::ConstantPointerNull::get(NPT), PT);
7998 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
8002 "Address space agnostic languages only");
8006 return DefaultGlobalAS;
8015 return ConstAS.getValue();
8017 return DefaultGlobalAS;
8021 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
const LangOptions &LangOpts,
8023 llvm::AtomicOrdering Ordering,
8024 llvm::LLVMContext &Ctx)
const {
8040 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
8042 Name = Twine(Twine(Name) + Twine(
"-")).str();
8044 Name = Twine(Twine(Name) + Twine(
"one-as")).str();
8047 return Ctx.getOrInsertSyncScopeID(Name);
8050 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
8056 FT = getABIInfo().getContext().adjustFunctionType(
8067 class SparcV8ABIInfo :
public DefaultABIInfo {
8130 class SparcV9ABIInfo :
public ABIInfo {
8151 struct CoerceBuilder {
8152 llvm::LLVMContext &Context;
8153 const llvm::DataLayout &DL;
8158 CoerceBuilder(llvm::LLVMContext &
c,
const llvm::DataLayout &dl)
8159 : Context(c), DL(dl), Size(0), InReg(
false) {}
8162 void pad(uint64_t ToSize) {
8163 assert(ToSize >= Size &&
"Cannot remove elements");
8168 uint64_t Aligned = llvm::alignTo(Size, 64);
8169 if (Aligned > Size && Aligned <= ToSize) {
8170 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
8175 while (Size + 64 <= ToSize) {
8176 Elems.push_back(llvm::Type::getInt64Ty(Context));
8181 if (Size < ToSize) {
8182 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
8196 Elems.push_back(Ty);
8197 Size = Offset + Bits;
8201 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
8202 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
8203 for (
unsigned i = 0, e = StrTy->getNumElements();
i != e; ++
i) {
8205 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(
i);
8206 switch (ElemTy->getTypeID()) {
8207 case llvm::Type::StructTyID:
8208 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
8210 case llvm::Type::FloatTyID:
8211 addFloat(ElemOffset, ElemTy, 32);
8213 case llvm::Type::DoubleTyID:
8214 addFloat(ElemOffset, ElemTy, 64);
8216 case llvm::Type::FP128TyID:
8217 addFloat(ElemOffset, ElemTy, 128);
8219 case llvm::Type::PointerTyID:
8220 if (ElemOffset % 64 == 0) {
8222 Elems.push_back(ElemTy);
8233 bool isUsableType(llvm::StructType *Ty)
const {
8234 return llvm::makeArrayRef(Elems) == Ty->elements();
8239 if (Elems.size() == 1)
8240 return Elems.front();
8242 return llvm::StructType::get(Context, Elems);
8257 if (Size > SizeLimit)
8262 Ty = EnumTy->getDecl()->getIntegerType();
8265 if (Size < 64 && Ty->isIntegerType())
8279 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(
CGT.
ConvertType(Ty));
8284 CB.addStruct(0, StrTy);
8285 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8288 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8307 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8317 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8348 return Builder.
CreateBitCast(ArgAddr, ArgPtrTy,
"arg.addr");
8380 llvm::IntegerType *i8 = CGF.
Int8Ty;
8381 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8382 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8409 class ARCABIInfo :
public DefaultABIInfo {
8411 using DefaultABIInfo::DefaultABIInfo;
8418 if (!State.FreeRegs)
8424 if (sz < State.FreeRegs)
8425 State.FreeRegs -= sz;
8441 updateState(I.info, I.type, State);
8465 const unsigned MinABIStackAlignInBytes = 4;
8468 TypeAlign > MinABIStackAlignInBytes);
8479 uint8_t FreeRegs)
const {
8485 return getIndirectByRef(Ty, FreeRegs > 0);
8488 return getIndirectByValue(Ty);
8493 Ty = EnumTy->getDecl()->getIntegerType();
8495 auto SizeInRegs = llvm::alignTo(
getContext().getTypeSize(Ty), 32) / 32;
8500 return getIndirectByValue(Ty);
8508 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8510 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8512 return FreeRegs >= SizeInRegs ?
8529 auto RetSize = llvm::alignTo(
getContext().getTypeSize(RetTy), 32) / 32;
8531 return getIndirectByRef(RetTy,
true);
8602 class TypeStringCache {
8603 enum Status {NonRecursive, Recursive,
Incomplete, IncompleteUsed};
8607 std::string Swapped;
8610 std::map<const IdentifierInfo *, struct Entry> Map;
8611 unsigned IncompleteCount;
8612 unsigned IncompleteUsedCount;
8614 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8624 class FieldEncoding {
8628 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8629 StringRef str() {
return Enc; }
8630 bool operator<(
const FieldEncoding &rhs)
const {
8631 if (HasName != rhs.HasName)
return HasName;
8632 return Enc < rhs.Enc;
8636 class XCoreABIInfo :
public DefaultABIInfo {
8644 mutable TypeStringCache TSC;
8648 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8668 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8669 AI.setCoerceToType(ArgTy);
8670 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8674 switch (AI.getKind()) {
8678 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8680 Val =
Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8688 ArgSize = ArgSize.
alignTo(SlotSize);
8712 std::string StubEnc) {
8716 assert( (E.Str.empty() || E.State == Recursive) &&
8717 "Incorrectly use of addIncomplete");
8718 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
8719 E.Swapped.swap(E.Str);
8720 E.Str.swap(StubEnc);
8729 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
8732 auto I = Map.find(ID);
8733 assert(I != Map.end() &&
"Entry not present");
8734 Entry &E = I->second;
8736 E.State == IncompleteUsed) &&
8737 "Entry must be an incomplete type");
8738 bool IsRecursive =
false;
8739 if (E.State == IncompleteUsed) {
8742 --IncompleteUsedCount;
8744 if (E.Swapped.empty())
8748 E.Swapped.swap(E.Str);
8750 E.State = Recursive;
8758 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
8760 if (!ID || IncompleteUsedCount)
8763 if (IsRecursive && !E.Str.empty()) {
8764 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8765 "This is not the same Recursive entry");
8771 assert(E.Str.empty() &&
"Entry already present");
8773 E.State = IsRecursive? Recursive : NonRecursive;
8782 auto I = Map.find(ID);
8785 Entry &E = I->second;
8786 if (E.State == Recursive && IncompleteCount)
8791 E.State = IncompleteUsed;
8792 ++IncompleteUsedCount;
8813 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8817 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
8818 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8819 llvm::MDString::get(Ctx, Enc.str())};
8820 llvm::NamedMDNode *MD =
8821 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
8822 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8835 unsigned getOpenCLKernelCallingConv()
const override;
8843 DefaultABIInfo SPIRABI(CGM.
getTypes());
8844 SPIRABI.computeInfo(FI);
8849 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
8850 return llvm::CallingConv::SPIR_KERNEL;
8855 TypeStringCache &TSC);
8863 TypeStringCache &TSC) {
8864 for (
const auto *Field : RD->
fields()) {
8867 Enc += Field->getName();
8869 if (Field->isBitField()) {
8871 llvm::raw_svector_ostream OS(Enc);
8872 OS << Field->getBitWidthValue(CGM.
getContext());
8875 if (!
appendType(Enc, Field->getType(), CGM, TSC))
8877 if (Field->isBitField())
8880 FE.emplace_back(!Field->getName().empty(), Enc);
8892 StringRef TypeString = TSC.lookupStr(ID);
8893 if (!TypeString.empty()) {
8899 size_t Start = Enc.size();
8907 bool IsRecursive =
false;
8914 std::string StubEnc(Enc.substr(Start).str());
8916 TSC.addIncomplete(ID, std::move(StubEnc));
8918 (void) TSC.removeIncomplete(ID);
8921 IsRecursive = TSC.removeIncomplete(ID);
8927 unsigned E = FE.size();
8928 for (
unsigned I = 0; I != E; ++I) {
8935 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8941 TypeStringCache &TSC,
8944 StringRef TypeString = TSC.lookupStr(ID);
8945 if (!TypeString.empty()) {
8950 size_t Start = Enc.size();
8959 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8961 SmallStringEnc EnumEnc;
8963 EnumEnc += I->getName();
8965 I->getInitVal().toString(EnumEnc);
8967 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8970 unsigned E = FE.size();
8971 for (
unsigned I = 0; I != E; ++I) {
8978 TSC.addIfComplete(ID, Enc.substr(Start),
false);
8986 static const char *
const Table[]={
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
8994 Enc += Table[Lookup];
8999 const char *EncType;
9001 case BuiltinType::Void:
9004 case BuiltinType::Bool:
9007 case BuiltinType::Char_U:
9010 case BuiltinType::UChar:
9013 case BuiltinType::SChar:
9016 case BuiltinType::UShort:
9019 case BuiltinType::Short:
9022 case BuiltinType::UInt:
9025 case BuiltinType::Int:
9028 case BuiltinType::ULong:
9031 case BuiltinType::Long:
9034 case BuiltinType::ULongLong:
9037 case BuiltinType::LongLong:
9040 case BuiltinType::Float:
9043 case BuiltinType::Double:
9046 case BuiltinType::LongDouble:
9059 TypeStringCache &TSC) {
9071 TypeStringCache &TSC, StringRef NoSizeEnc) {
9076 CAT->getSize().toStringUnsigned(Enc);
9092 TypeStringCache &TSC) {
9099 auto I = FPT->param_type_begin();
9100 auto E = FPT->param_type_end();
9109 if (FPT->isVariadic())
9112 if (FPT->isVariadic())
9126 TypeStringCache &TSC) {
9163 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
9166 return appendType(Enc, FD->getType(), CGM, TSC);
9169 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
9172 QualType QT = VD->getType().getCanonicalType();
9189 class RISCVABIInfo :
public DefaultABIInfo {
9197 static const int NumArgGPRs = 8;
9198 static const int NumArgFPRs = 8;
9207 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
9214 int &ArgFPRsLeft)
const;
9224 CharUnits &Field2Off,
int &NeededArgGPRs,
9225 int &NeededArgFPRs)
const;
9250 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
9251 int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
9256 bool IsFixed = ArgNum < NumFixedArgs;
9267 bool RISCVABIInfo::detectFPCCEligibleStructHelper(
QualType Ty,
CharUnits CurOff,
9275 if (IsInt || IsFloat) {
9277 if (IsInt && Size > XLen)
9282 if (IsFloat && (Size > FLen || Size < 32))
9286 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
9304 QualType EltTy = CTy->getElementType();
9309 assert(CurOff.
isZero() &&
"Unexpected offset for first field");
9310 Field2Ty = Field1Ty;
9316 uint64_t ArraySize = ATy->getSize().getZExtValue();
9317 QualType EltTy = ATy->getElementType();
9319 for (uint64_t
i = 0;
i < ArraySize; ++
i) {
9320 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
9321 Field1Off, Field2Ty, Field2Off);
9340 int ZeroWidthBitFieldCount = 0;
9343 uint64_t FieldOffInBits = Layout.
getFieldOffset(FD->getFieldIndex());
9345 if (FD->isBitField()) {
9346 unsigned BitWidth = FD->getBitWidthValue(
getContext());
9349 if (
getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
9351 if (BitWidth == 0) {
9352 ZeroWidthBitFieldCount++;
9357 bool Ret = detectFPCCEligibleStructHelper(
9358 QTy, CurOff +
getContext().toCharUnitsFromBits(FieldOffInBits),
9359 Field1Ty, Field1Off, Field2Ty, Field2Off);
9366 if (Field2Ty && ZeroWidthBitFieldCount > 0)
9369 return Field1Ty !=
nullptr;
9384 int &NeededArgFPRs)
const {
9389 bool IsCandidate = detectFPCCEligibleStructHelper(
9392 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
9393 return IsCandidate =
false;
9396 if (Field1Ty && Field1Ty->isFloatingPointTy())
9400 if (Field2Ty && Field2Ty->isFloatingPointTy())
9410 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
9416 CoerceElts.push_back(llvm::ArrayType::get(
9419 CoerceElts.push_back(Field1Ty);
9420 UnpaddedCoerceElts.push_back(Field1Ty);
9425 UnpaddedCoerceElts[0]);
9435 if (Field2Off > Field2OffNoPadNoPack)
9436 Padding = Field2Off - Field2OffNoPadNoPack;
9437 else if (Field2Off != Field2Align && Field2Off > Field1Size)
9438 Padding = Field2Off - Field1Size;
9443 CoerceElts.push_back(llvm::ArrayType::get(
9446 CoerceElts.push_back(Field2Ty);
9447 UnpaddedCoerceElts.push_back(Field2Ty);
9450 llvm::StructType::get(
getVMContext(), CoerceElts, IsPacked);
9451 auto UnpaddedCoerceToType =
9452 llvm::StructType::get(
getVMContext(), UnpaddedCoerceElts, IsPacked);
9459 int &ArgFPRsLeft)
const {
9460 assert(ArgGPRsLeft <= NumArgGPRs &&
"Arg GPR tracking underflow");
9479 if (IsFixed && Ty->
isFloatingType() && FLen >= Size && ArgFPRsLeft) {
9486 if (IsFixed && Ty->
isComplexType() && FLen && ArgFPRsLeft >= 2) {
9488 if (
getContext().getTypeSize(EltTy) <= FLen) {
9502 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
9503 NeededArgGPRs, NeededArgFPRs);
9504 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
9505 NeededArgFPRs <= ArgFPRsLeft) {
9506 ArgGPRsLeft -= NeededArgGPRs;
9507 ArgFPRsLeft -= NeededArgFPRs;
9508 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
9514 bool MustUseStack =
false;
9518 int NeededArgGPRs = 1;
9519 if (!IsFixed && NeededAlign == 2 * XLen)
9520 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
9521 else if (Size > XLen && Size <= 2 * XLen)
9524 if (NeededArgGPRs > ArgGPRsLeft) {
9525 MustUseStack =
true;
9526 NeededArgGPRs = ArgGPRsLeft;
9529 ArgGPRsLeft -= NeededArgGPRs;
9534 Ty = EnumTy->getDecl()->getIntegerType();
9538 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
9539 return extendType(Ty);
9547 if (Size <= 2 * XLen) {
9555 }
else if (Alignment == 2 * XLen) {
9570 int ArgGPRsLeft = 2;
9571 int ArgFPRsLeft = FLen ? 2 : 0;
9590 std::pair<CharUnits, CharUnits> SizeAndAlign =
9594 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
9615 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
9617 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
9620 const auto *
Attr = FD->getAttr<RISCVInterruptAttr>();
9625 switch (
Attr->getInterrupt()) {
9626 case RISCVInterruptAttr::user: Kind =
"user";
break;
9627 case RISCVInterruptAttr::supervisor: Kind =
"supervisor";
break;
9628 case RISCVInterruptAttr::machine: Kind =
"machine";
break;
9631 auto *Fn = cast<llvm::Function>(GV);
9633 Fn->addFnAttr(
"interrupt", Kind);
9643 return getTriple().supportsCOMDAT();
9647 if (TheTargetCodeGenInfo)
9648 return *TheTargetCodeGenInfo;
9652 this->TheTargetCodeGenInfo.reset(
P);
9657 switch (Triple.getArch()) {
9659 return SetCGInfo(
new DefaultTargetCodeGenInfo(Types));
9661 case llvm::Triple::le32:
9662 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
9663 case llvm::Triple::mips:
9664 case llvm::Triple::mipsel:
9665 if (Triple.getOS() == llvm::Triple::NaCl)
9666 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
9667 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
true));
9669 case llvm::Triple::mips64:
9670 case llvm::Triple::mips64el:
9671 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
false));
9673 case llvm::Triple::avr:
9674 return SetCGInfo(
new AVRTargetCodeGenInfo(Types));
9676 case llvm::Triple::aarch64:
9677 case llvm::Triple::aarch64_be: {
9678 AArch64ABIInfo::ABIKind
Kind = AArch64ABIInfo::AAPCS;
9679 if (
getTarget().getABI() ==
"darwinpcs")
9680 Kind = AArch64ABIInfo::DarwinPCS;
9681 else if (Triple.isOSWindows())
9683 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
9685 return SetCGInfo(
new AArch64TargetCodeGenInfo(Types, Kind));
9688 case llvm::Triple::wasm32:
9689 case llvm::Triple::wasm64:
9690 return SetCGInfo(
new WebAssemblyTargetCodeGenInfo(Types));
9692 case llvm::Triple::arm:
9693 case llvm::Triple::armeb:
9694 case llvm::Triple::thumb:
9695 case llvm::Triple::thumbeb: {
9696 if (Triple.getOS() == llvm::Triple::Win32) {
9698 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
9701 ARMABIInfo::ABIKind
Kind = ARMABIInfo::AAPCS;
9703 if (ABIStr ==
"apcs-gnu")
9704 Kind = ARMABIInfo::APCS;
9705 else if (ABIStr ==
"aapcs16")
9706 Kind = ARMABIInfo::AAPCS16_VFP;
9707 else if (CodeGenOpts.FloatABI ==
"hard" ||
9708 (CodeGenOpts.FloatABI !=
"soft" &&
9709 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
9710 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
9711 Triple.getEnvironment() == llvm::Triple::EABIHF)))
9712 Kind = ARMABIInfo::AAPCS_VFP;
9714 return SetCGInfo(
new ARMTargetCodeGenInfo(Types, Kind));
9717 case llvm::Triple::ppc:
9719 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI ==
"soft"));
9720 case llvm::Triple::ppc64:
9721 if (Triple.isOSBinFormatELF()) {
9722 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv1;
9724 Kind = PPC64_SVR4_ABIInfo::ELFv2;
9726 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
9728 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9731 return SetCGInfo(
new PPC64TargetCodeGenInfo(Types));
9732 case llvm::Triple::ppc64le: {
9733 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
9734 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv2;
9736 Kind = PPC64_SVR4_ABIInfo::ELFv1;
9738 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
9740 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9744 case llvm::Triple::nvptx:
9745 case llvm::Triple::nvptx64:
9746 return SetCGInfo(
new NVPTXTargetCodeGenInfo(Types));
9748 case llvm::Triple::msp430:
9749 return SetCGInfo(
new MSP430TargetCodeGenInfo(Types));
9751 case llvm::Triple::riscv32:
9752 case llvm::Triple::riscv64: {
9755 unsigned ABIFLen = 0;
9756 if (ABIStr.endswith(
"f"))
9758 else if (ABIStr.endswith(
"d"))
9760 return SetCGInfo(
new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
9763 case llvm::Triple::systemz: {
9765 return SetCGInfo(
new SystemZTargetCodeGenInfo(Types, HasVector));
9768 case llvm::Triple::tce:
9769 case llvm::Triple::tcele:
9770 return SetCGInfo(
new TCETargetCodeGenInfo(Types));
9772 case llvm::Triple::x86: {
9773 bool IsDarwinVectorABI = Triple.isOSDarwin();
9774 bool RetSmallStructInRegABI =
9775 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
9776 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
9778 if (Triple.getOS() == llvm::Triple::Win32) {
9779 return SetCGInfo(
new WinX86_32TargetCodeGenInfo(
9780 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9781 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
9783 return SetCGInfo(
new X86_32TargetCodeGenInfo(
9784 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9785 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
9786 CodeGenOpts.FloatABI ==
"soft"));
9790 case llvm::Triple::x86_64: {
9794 ? X86AVXABILevel::AVX512
9797 switch (Triple.getOS()) {
9798 case llvm::Triple::Win32:
9799 return SetCGInfo(
new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
9801 return SetCGInfo(
new X86_64TargetCodeGenInfo(Types, AVXLevel));
9804 case llvm::Triple::hexagon:
9805 return SetCGInfo(
new HexagonTargetCodeGenInfo(Types));
9806 case llvm::Triple::lanai:
9807 return SetCGInfo(
new LanaiTargetCodeGenInfo(Types));
9808 case llvm::Triple::r600:
9809 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
9810 case llvm::Triple::amdgcn:
9811 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
9812 case llvm::Triple::sparc:
9813 return SetCGInfo(
new SparcV8TargetCodeGenInfo(Types));
9814 case llvm::Triple::sparcv9:
9815 return SetCGInfo(
new SparcV9TargetCodeGenInfo(Types));
9816 case llvm::Triple::xcore:
9817 return SetCGInfo(
new XCoreTargetCodeGenInfo(Types));
9818 case llvm::Triple::arc:
9819 return SetCGInfo(
new ARCTargetCodeGenInfo(Types));
9820 case llvm::Triple::spir:
9821 case llvm::Triple::spir64:
9822 return SetCGInfo(
new SPIRTargetCodeGenInfo(Types));
9833 llvm::Function *Invoke,
9835 auto *InvokeFT = Invoke->getFunctionType();
9837 for (
auto &
P : InvokeFT->params())
9838 ArgTys.push_back(
P);
9840 std::string Name = Invoke->getName().str() +
"_kernel";
9841 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9844 auto IP = CGF.
Builder.saveIP();
9847 Builder.SetInsertPoint(BB);
9849 for (
auto &A : F->args())
9851 Builder.CreateCall(Invoke, Args);
9852 Builder.CreateRetVoid();
9853 Builder.restoreIP(IP);
9865 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
9871 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
9872 auto *InvokeFT = Invoke->getFunctionType();
9881 ArgTys.push_back(BlockTy);
9882 ArgTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9883 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
9884 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9885 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9886 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9887 ArgNames.push_back(llvm::MDString::get(C,
"block_literal"));
9888 for (
unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
9889 ArgTys.push_back(InvokeFT->getParamType(I));
9890 ArgTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9891 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
9892 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9893 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9894 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9896 llvm::MDString::get(C, (Twine(
"local_arg") + Twine(I)).str()));
9898 std::string Name = Invoke->getName().str() +
"_kernel";
9899 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9902 F->addFnAttr(
"enqueued-block");
9903 auto IP = CGF.
Builder.saveIP();
9905 Builder.SetInsertPoint(BB);
9906 unsigned BlockAlign = CGF.
CGM.
getDataLayout().getPrefTypeAlignment(BlockTy);
9907 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
9908 BlockPtr->setAlignment(BlockAlign);
9909 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
9910 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
9912 Args.push_back(Cast);
9913 for (
auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
9915 Builder.CreateCall(Invoke, Args);
9916 Builder.CreateRetVoid();
9917 Builder.restoreIP(IP);
9919 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
9920 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
9921 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
9922 F->setMetadata(
"kernel_arg_base_type",
9923 llvm::MDNode::get(C, ArgBaseTypeNames));
9924 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
9926 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(C, ArgNames));
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
Ignore - Ignore the argument (treat as void).
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
bool isFloatingPoint() const
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Represents a function declaration or definition.
void setEffectiveCallingConvention(unsigned Value)
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T -> getSizeExpr()))
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isBlockPointerType() const
CodeGenTypes & getTypes()
bool isMemberPointerType() const
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate. ...
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
FunctionType - C99 6.7.5.3 - Function Declarators.
llvm::ConstantInt * getSize(CharUnits N)
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isRealFloatingType() const
Floating point categories.
Extend - Valid only for integer argument types.
bool isRecordType() const
Decl - This represents one declaration (or definition), e.g.
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
CharUnits getPointerSize() const
const RecordType * getAsStructureType() const
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const llvm::DataLayout & getDataLayout() const
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
The base class of the type hierarchy.
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isZero() const
isZero - Test whether the quantity equals zero.
const TargetInfo & getTargetInfo() const
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Handles the type's qualifier before dispatching a call to handle specific type encodings.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
virtual ~TargetCodeGenInfo()
void setCanBeFlattened(bool Flatten)
QualType getElementType() const
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
unsigned getTypeAlign(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in bits.
ASTContext & getContext() const
Represents a variable declaration or definition.
Objects with "hidden" visibility are not seen by the dynamic linker.
LangAS getLangASFromTargetAS(unsigned TargetAS)
bool isEnumeralType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
bool supportsCOMDAT() const
LangAS
Defines the address space values used by the address space qualifier of QualType. ...
llvm::LLVMContext & getVMContext() const
void setCoerceToType(llvm::Type *T)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * getPointer() const
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Represents a struct/union/class.
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
static ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
CodeGen::CodeGenTypes & CGT
One of these records is kept for each identifier that is lexed.
Address getAddress() const
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
llvm::IntegerType * Int64Ty
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
field_range fields() const
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
Represents a member of a struct/union/class.
bool isReferenceType() const
CharUnits getTypeUnadjustedAlignInChars(QualType T) const
getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a type, in characters, before alignment adjustments.
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
static bool requiresAMDGPUDefaultVisibility(const Decl *D, llvm::GlobalValue *GV)
static bool occupiesMoreThan(CodeGenTypes &cgt, ArrayRef< llvm::Type *> scalarTypes, unsigned maxAllRegisters)
Does the given lowering require more than the given number of registers when expanded?
__DEVICE__ int max(int __a, int __b)
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
ABIInfo(CodeGen::CodeGenTypes &cgt)
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal=true, bool Realign=false)
Objects with "default" visibility are seen by the dynamic linker and act like normal objects...
virtual bool hasLegalHalfType() const
Determine whether _Float16 is supported on this target.
virtual StringRef getABI() const
Get the ABI currently in use.
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
bool getHasRegParm() const
bool isBitField() const
Determines whether this field is a bitfield.
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends structure and union types to Enc and adds encoding to cache.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
CharUnits - This is an opaque type for sizes expressed in character units.
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type...
CharUnits getAlignment() const
Return the alignment of this pointer.
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV)
const_arg_iterator arg_begin() const
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Scope - A scope is a transient data structure that is used while parsing the program.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
field_iterator field_begin() const
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static ABIArgInfo getExpand()
CharUnits getPointerAlign() const
bool isFloat128Type() const
bool isScalarType() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
unsigned getTypeUnadjustedAlign(QualType T) const
Return the ABI-specified natural alignment of a (complete) type T, before alignment adjustments...
constexpr XRayInstrMask All
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor)
isTypeConstant - Determine whether an object of this type can be emitted as a constant.
ExtInfo withCallingConv(CallingConv cc) const
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
ContainsFloatAtOffset - Return true if the specified LLVM IR type has a float member at the specified...
static ABIArgInfo getSignExtend(QualType Ty, llvm::Type *T=nullptr)
CanQualType getReturnType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
static CharUnits One()
One - Construct a CharUnits quantity of one.
ASTContext & getContext() const
Represents a prototype with parameter type info, e.g.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
const TargetCodeGenInfo & getTargetCodeGenInfo()
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Helper function for appendRecordType().
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
Gets the linker options necessary to link a dependent library on this platform.
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
void setAddress(Address address)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
const llvm::fltSemantics & getLongDoubleFormat() const
Exposes information about the current target.
CodeGen::ABIArgInfo getNaturalAlignIndirect(QualType Ty, bool ByRef=true, bool Realign=false, llvm::Type *Padding=nullptr) const
A convenience method to return an indirect ABIArgInfo with an expected alignment equal to the ABI ali...
QualType getElementType() const
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorType::VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
field_iterator field_end() const
virtual bool classifyReturnType(CGFunctionInfo &FI) const =0
If the C++ ABI requires the given type be returned in a particular way, this method sets RetAI and re...
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isAnyComplexType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
The XCore ABI includes a type information section that communicates symbol type information to the li...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
EnumDecl * getDefinition() const
llvm::CallingConv::ID RuntimeCC
static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
llvm::LLVMContext & getLLVMContext()
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
llvm::IntegerType * Int32Ty
Objects with "protected" visibility are seen by the dynamic linker but always dynamically resolve to ...
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty, bool Realign=false) const
const CodeGenOptions & getCodeGenOpts() const
bool canHaveCoerceToType() const
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
bool getIndirectByVal() const
static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Represents a GCC generic vector type.
ArraySizeModifier getSizeModifier() const
virtual unsigned getSizeOfUnwindException() const
Determines the size of struct _Unwind_Exception on this platform, in 8-bit units. ...
Implements C++ ABI-specific semantic analysis functions.
const TargetInfo & getTarget() const
const LangOptions & getLangOpts() const
ASTContext & getContext() const
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Attempt to be ABI-compatible with code generated by Clang 3.8.x (SVN r257626).
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
bool isConstQualified() const
Determine whether this type is const-qualified.
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Pass it as a pointer to temporary memory.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
bool isStructureOrClassType() const
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
Appends type's qualifier to Enc.
static Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
QualType getCanonicalType() const
bool isBuiltinType() const
Helper methods to distinguish type categories.
QualType getReturnType() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums...
bool isSRetAfterThis() const
LangAS getAddressSpace() const
Return the address space of this type.
unsigned getRegParm() const
const TargetInfo & getTarget() const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
isEmptyRecord - Return true iff a structure contains only empty fields.
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a function encoding to Enc, calling appendType for the return type and the arguments...
static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type)
SyncScope
Defines synch scope values used internally by clang.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ...
const llvm::DataLayout & getDataLayout() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const ConstantArrayType * getAsConstantArrayType(QualType T) const
const_arg_iterator arg_end() const
CoerceAndExpand - Only valid for aggregate argument types.
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
bool isMemberFunctionPointerType() const
llvm::LLVMContext & getLLVMContext()
bool canPassInRegisters() const
Determine whether this class can be passed in registers.
constexpr XRayInstrMask None
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
bool isTargetAddressSpace(LangAS AS)
EnumDecl * getDecl() const
bool isVectorType() const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues, like target-specific attributes, builtins and so on.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
X86AVXABILevel
The AVX ABI level for X86 targets.
llvm::CallingConv::ID getRuntimeCC() const
Return the calling convention to use for system runtime functions.
bool hasFlexibleArrayMember() const
static llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
StringRef getName() const
Return the actual identifier string.
const TargetInfo & getTarget() const
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
bool isFloat16Type() const
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA...
ExtInfo getExtInfo() const
A refining implementation of ABIInfo for targets that support swiftcall.
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
virtual llvm::Function * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Value *BlockLiteral) const
Create an OpenCL kernel for an enqueued block.
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::IntegerType * IntPtrTy
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
virtual bool hasFloat16Type() const
Determine whether the _Float16 type is supported on this target.
llvm::Module & getModule() const
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
unsigned getIntWidth(QualType T) const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual llvm::Optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory...
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Complex values, per C99 6.2.5p11.
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Implements C++ ABI-specific code generation functions.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
llvm::PointerType * Int8PtrTy
CodeGen::CGCXXABI & getCXXABI() const
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
Expand - Only valid for aggregate argument types.
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
virtual bool hasFloat128Type() const
Determine whether the __float128 type is supported on this target.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
virtual bool hasInt128Type() const
Determine whether the __int128 type is supported on this target.
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
Represents a base class of a C++ class.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
ASTContext & getContext() const
Pass it on the stack using its defined layout.
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
CallingConv getCallConv() const
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Represents a C++ struct/union/class.
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
llvm::Type * ConvertType(QualType T)
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like 'int'.
__DEVICE__ int min(int __a, int __b)
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, std::pair< CharUnits, CharUnits > ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions...
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
bool isPointerType() const
unsigned getNumRequiredArgs() const
unsigned getDirectOffset() const
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isFloatingType() const
LValue - This represents an lvalue references.
llvm::Type * getCoerceToType() const
void setInAllocaSRet(bool SRet)
unsigned getTargetAddressSpace(QualType T) const
RecordArgABI
Specify how one should pass an argument of a record type.
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
CallArgList - Type for representing both the value and type of arguments in a call.
const LangOptions & getLangOpts() const
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
Represents the canonical version of C arrays with a specified constant size.
bool getIndirectRealign() const
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
Attr - This represents one attribute.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr *> VL, ArrayRef< Expr *> PL, ArrayRef< Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate)
Creates clause with a list of variables VL and a linear step Step.
const CodeGenOptions & getCodeGenOpts() const
const llvm::Triple & getTriple() const