25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringSwitch.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/Support/raw_ostream.h" 34 using namespace clang;
35 using namespace CodeGen;
53 llvm::LLVMContext &LLVMContext) {
57 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
58 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
68 for (
unsigned I = FirstIndex; I <= LastIndex; ++I) {
70 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
84 ByRef, Realign, Padding);
115 unsigned maxAllRegisters) {
116 unsigned intCount = 0, fpCount = 0;
118 if (
type->isPointerTy()) {
120 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
122 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
124 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
129 return (intCount + fpCount > maxAllRegisters);
134 unsigned numElts)
const {
164 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
165 !RT->getDecl()->canPassInRegisters()) {
178 if (UD->
hasAttr<TransparentUnionAttr>()) {
179 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
217 uint64_t Members)
const {
222 raw_ostream &OS = llvm::errs();
223 OS <<
"(ABIArgInfo Kind=";
226 OS <<
"Direct Type=";
239 OS <<
"InAlloca Offset=" << getInAllocaFieldIndex();
242 OS <<
"Indirect Align=" << getIndirectAlign().getQuantity()
243 <<
" ByVal=" << getIndirectByVal()
244 <<
" Realign=" << getIndirectRealign();
249 case CoerceAndExpand:
250 OS <<
"CoerceAndExpand Type=";
251 getCoerceAndExpandType()->print(OS);
264 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
266 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
268 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
270 Ptr->getName() +
".aligned");
294 bool AllowHigherAlign) {
304 if (AllowHigherAlign && DirectAlign > SlotSize) {
321 !DirectTy->isStructTy()) {
344 std::pair<CharUnits, CharUnits> ValueInfo,
346 bool AllowHigherAlign) {
353 DirectSize = ValueInfo.first;
354 DirectAlign = ValueInfo.second;
360 DirectTy = DirectTy->getPointerTo(0);
363 DirectSize, DirectAlign,
376 Address Addr1, llvm::BasicBlock *Block1,
377 Address Addr2, llvm::BasicBlock *Block2,
378 const llvm::Twine &Name =
"") {
380 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(Addr1.
getType(), 2, Name);
431 return llvm::CallingConv::SPIR_KERNEL;
435 llvm::PointerType *T,
QualType QT)
const {
436 return llvm::ConstantPointerNull::get(T);
443 "Address space agnostic languages only");
452 if (
auto *C = dyn_cast<llvm::Constant>(Src))
453 return performAddrSpaceCast(CGF.
CGM, C, SrcAddr, DestAddr, DestTy);
463 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
468 return C.getOrInsertSyncScopeID(
"");
486 if (AT->getSize() == 0)
488 FT = AT->getElementType();
499 if (isa<CXXRecordDecl>(RT->
getDecl()))
517 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
518 for (
const auto &I : CXXRD->bases())
522 for (
const auto *I : RD->
fields())
545 const Type *Found =
nullptr;
548 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
549 for (
const auto &I : CXXRD->bases()) {
567 for (
const auto *FD : RD->
fields()) {
581 if (AT->getSize().getZExtValue() != 1)
583 FT = AT->getElementType();
619 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
622 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
631 return Address(Addr, TyAlignForABI);
634 "Unexpected ArgInfo Kind in generic VAArg emitter!");
637 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
639 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
641 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
643 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
656 class DefaultABIInfo :
public ABIInfo {
667 I.info = classifyArgumentType(I.type);
696 Ty = EnumTy->getDecl()->getIntegerType();
711 RetTy = EnumTy->getDecl()->getIntegerType();
723 class WebAssemblyABIInfo final :
public DefaultABIInfo {
726 : DefaultABIInfo(CGT) {}
739 Arg.info = classifyArgumentType(Arg.type);
751 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
753 if (
auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
754 llvm::Function *Fn = cast<llvm::Function>(GV);
755 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
756 Fn->addFnAttr(
"no-prototype");
819 class PNaClABIInfo :
public ABIInfo {
864 Ty = EnumTy->getDecl()->getIntegerType();
884 RetTy = EnumTy->getDecl()->getIntegerType();
893 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
894 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
895 IRType->getScalarSizeInBits() != 64;
899 StringRef Constraint,
901 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
902 .Cases(
"y",
"&y",
"^Ym",
true)
904 if (IsMMXCons && Ty->isVectorTy()) {
905 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
921 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
922 if (BT->getKind() == BuiltinType::LongDouble) {
924 &llvm::APFloat::x87DoubleExtended())
933 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
941 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
942 return NumMembers <= 4;
959 CCState(
unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
963 unsigned FreeSSERegs;
968 VectorcallMaxParamNumAsReg = 6
978 static const unsigned MinABIStackAlignInBytes = 4;
980 bool IsDarwinVectorABI;
981 bool IsRetSmallStructInRegABI;
982 bool IsWin32StructABI;
985 unsigned DefaultNumRegisterParameters;
987 static bool isRegisterSize(
unsigned Size) {
988 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
993 return isX86VectorTypeForVectorCall(
getContext(), Ty);
997 uint64_t NumMembers)
const override {
999 return isX86VectorCallAggregateSmallEnough(NumMembers);
1011 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
1019 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
1021 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
1022 bool &NeedsPadding)
const;
1023 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
1025 bool canExpandIndirectArgument(
QualType Ty)
const;
1035 bool &UsedInAlloca)
const;
1044 bool RetSmallStructInRegABI,
bool Win32StructABI,
1045 unsigned NumRegisterParameters,
bool SoftFloatABI)
1046 :
SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1047 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1048 IsWin32StructABI(Win32StructABI),
1049 IsSoftFloatABI(SoftFloatABI),
1051 DefaultNumRegisterParameters(NumRegisterParameters) {}
1054 bool asReturnValue)
const override {
1062 bool isSwiftErrorInRegister()
const override {
1071 bool RetSmallStructInRegABI,
bool Win32StructABI,
1072 unsigned NumRegisterParameters,
bool SoftFloatABI)
1074 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1075 NumRegisterParameters, SoftFloatABI)) {}
1077 static bool isStructReturnInRegABI(
1080 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1093 StringRef Constraint,
1095 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1099 std::string &Constraints,
1100 std::vector<llvm::Type *> &ResultRegTypes,
1101 std::vector<llvm::Type *> &ResultTruncRegTypes,
1102 std::vector<LValue> &ResultRegDests,
1103 std::string &AsmString,
1104 unsigned NumOutputs)
const override;
1108 unsigned Sig = (0xeb << 0) |
1112 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1115 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
1116 return "movl\t%ebp, %ebp" 1117 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1132 unsigned NumNewOuts,
1133 std::string &AsmString) {
1135 llvm::raw_string_ostream OS(Buf);
1137 while (Pos < AsmString.size()) {
1138 size_t DollarStart = AsmString.find(
'$', Pos);
1139 if (DollarStart == std::string::npos)
1140 DollarStart = AsmString.size();
1141 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1142 if (DollarEnd == std::string::npos)
1143 DollarEnd = AsmString.size();
1144 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1146 size_t NumDollars = DollarEnd - DollarStart;
1147 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1149 size_t DigitStart = Pos;
1150 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1151 if (DigitEnd == std::string::npos)
1152 DigitEnd = AsmString.size();
1153 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1154 unsigned OperandIndex;
1155 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1156 if (OperandIndex >= FirstIn)
1157 OperandIndex += NumNewOuts;
1165 AsmString = std::move(OS.str());
1169 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1171 std::vector<llvm::Type *> &ResultRegTypes,
1172 std::vector<llvm::Type *> &ResultTruncRegTypes,
1173 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1174 unsigned NumOutputs)
const {
1179 if (!Constraints.empty())
1181 if (RetWidth <= 32) {
1182 Constraints +=
"={eax}";
1183 ResultRegTypes.push_back(CGF.
Int32Ty);
1186 Constraints +=
"=A";
1187 ResultRegTypes.push_back(CGF.
Int64Ty);
1192 ResultTruncRegTypes.push_back(CoerceTy);
1196 CoerceTy->getPointerTo()));
1197 ResultRegDests.push_back(ReturnSlot);
1204 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1210 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1216 if (Size == 64 || Size == 128)
1231 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1235 if (!RT)
return false;
1247 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1256 Ty = CTy->getElementType();
1266 return Size == 32 || Size == 64;
1271 for (
const auto *FD : RD->
fields()) {
1281 if (FD->isBitField())
1306 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1313 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1314 if (!IsWin32StructABI) {
1317 if (!CXXRD->isCLike())
1321 if (CXXRD->isDynamicClass())
1338 if (State.FreeRegs) {
1347 CCState &State)
const {
1352 uint64_t NumElts = 0;
1353 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1354 State.CC == llvm::CallingConv::X86_RegCall) &&
1362 if (IsDarwinVectorABI) {
1374 if ((Size == 8 || Size == 16 || Size == 32) ||
1375 (Size == 64 && VT->getNumElements() == 1))
1379 return getIndirectReturnResult(RetTy, State);
1388 if (RT->getDecl()->hasFlexibleArrayMember())
1389 return getIndirectReturnResult(RetTy, State);
1394 return getIndirectReturnResult(RetTy, State);
1402 if (shouldReturnTypeInRegister(RetTy,
getContext())) {
1411 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1412 || SeltTy->hasPointerRepresentation())
1420 return getIndirectReturnResult(RetTy, State);
1425 RetTy = EnumTy->getDecl()->getIntegerType();
1442 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1443 for (
const auto &I : CXXRD->bases())
1447 for (
const auto *i : RD->
fields()) {
1460 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1461 unsigned Align)
const {
1464 if (Align <= MinABIStackAlignInBytes)
1468 if (!IsDarwinVectorABI) {
1470 return MinABIStackAlignInBytes;
1478 return MinABIStackAlignInBytes;
1482 CCState &State)
const {
1484 if (State.FreeRegs) {
1494 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1495 if (StackAlign == 0)
1500 bool Realign = TypeAlign > StackAlign;
1505 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1512 if (K == BuiltinType::Float || K == BuiltinType::Double)
1518 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1519 if (!IsSoftFloatABI) {
1520 Class C = classify(Ty);
1526 unsigned SizeInRegs = (Size + 31) / 32;
1528 if (SizeInRegs == 0)
1532 if (SizeInRegs > State.FreeRegs) {
1541 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1545 State.FreeRegs -= SizeInRegs;
1549 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1551 bool &NeedsPadding)
const {
1558 NeedsPadding =
false;
1561 if (!updateFreeRegs(Ty, State))
1567 if (State.CC == llvm::CallingConv::X86_FastCall ||
1568 State.CC == llvm::CallingConv::X86_VectorCall ||
1569 State.CC == llvm::CallingConv::X86_RegCall) {
1570 if (
getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1571 NeedsPadding =
true;
1579 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1580 if (!updateFreeRegs(Ty, State))
1586 if (State.CC == llvm::CallingConv::X86_FastCall ||
1587 State.CC == llvm::CallingConv::X86_VectorCall ||
1588 State.CC == llvm::CallingConv::X86_RegCall) {
1600 CCState &State)
const {
1610 return getIndirectResult(Ty,
false, State);
1620 uint64_t NumElts = 0;
1621 if (State.CC == llvm::CallingConv::X86_RegCall &&
1624 if (State.FreeSSERegs >= NumElts) {
1625 State.FreeSSERegs -= NumElts;
1630 return getIndirectResult(Ty,
false, State);
1637 return getIndirectResult(Ty,
true, State);
1644 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1645 bool NeedsPadding =
false;
1647 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1650 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1656 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1664 if (
getContext().getTypeSize(Ty) <= 4 * 32 &&
1665 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1667 State.CC == llvm::CallingConv::X86_FastCall ||
1668 State.CC == llvm::CallingConv::X86_VectorCall ||
1669 State.CC == llvm::CallingConv::X86_RegCall,
1672 return getIndirectResult(Ty,
true, State);
1678 if (IsDarwinVectorABI) {
1680 if ((Size == 8 || Size == 16 || Size == 32) ||
1681 (Size == 64 && VT->getNumElements() == 1))
1694 Ty = EnumTy->getDecl()->getIntegerType();
1696 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1709 void X86_32ABIInfo::computeVectorCallArgs(
CGFunctionInfo &FI, CCState &State,
1710 bool &UsedInAlloca)
const {
1724 uint64_t NumElts = 0;
1728 if (State.FreeSSERegs >= NumElts) {
1729 State.FreeSSERegs -= NumElts;
1741 uint64_t NumElts = 0;
1747 if (State.FreeSSERegs >= NumElts) {
1748 State.FreeSSERegs -= NumElts;
1749 I.info = getDirectX86Hva();
1751 I.info = getIndirectResult(Ty,
false, State);
1753 }
else if (!IsHva) {
1765 else if (State.CC == llvm::CallingConv::X86_FastCall)
1767 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1769 State.FreeSSERegs = 6;
1772 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1774 State.FreeSSERegs = 8;
1776 State.FreeRegs = DefaultNumRegisterParameters;
1783 if (State.FreeRegs) {
1794 bool UsedInAlloca =
false;
1795 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1796 computeVectorCallArgs(FI, State, UsedInAlloca);
1808 rewriteWithInAlloca(FI);
1818 assert(StackOffset.
isMultipleOf(FieldAlign) &&
"unaligned inalloca struct");
1825 StackOffset = FieldEnd.
alignTo(FieldAlign);
1826 if (StackOffset != FieldEnd) {
1827 CharUnits NumBytes = StackOffset - FieldEnd;
1829 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1830 FrameFields.push_back(Ty);
1855 llvm_unreachable(
"invalid enum");
1858 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1859 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1876 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1883 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1893 for (; I != E; ++I) {
1895 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1913 getTypeStackAlignInBytes(Ty,
TypeInfo.second.getQuantity()));
1920 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1922 assert(Triple.getArch() == llvm::Triple::x86);
1924 switch (Opts.getStructReturnConvention()) {
1933 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1936 switch (Triple.getOS()) {
1937 case llvm::Triple::DragonFly:
1938 case llvm::Triple::FreeBSD:
1939 case llvm::Triple::OpenBSD:
1940 case llvm::Triple::Win32:
1947 void X86_32TargetCodeGenInfo::setTargetAttributes(
1949 if (GV->isDeclaration())
1951 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1952 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1953 llvm::Function *Fn = cast<llvm::Function>(GV);
1954 Fn->addFnAttr(
"stackrealign");
1956 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1957 llvm::Function *Fn = cast<llvm::Function>(GV);
1958 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1963 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1986 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
2013 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
2015 case X86AVXABILevel::AVX512:
2017 case X86AVXABILevel::AVX:
2022 llvm_unreachable(
"Unknown AVXLevel");
2047 static Class merge(Class Accum, Class Field);
2063 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
2089 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2090 bool isNamedArg)
const;
2094 unsigned IROffset,
QualType SourceTy,
2095 unsigned SourceOffset)
const;
2097 unsigned IROffset,
QualType SourceTy,
2098 unsigned SourceOffset)
const;
2114 unsigned &neededInt,
unsigned &neededSSE,
2115 bool isNamedArg)
const;
2118 unsigned &NeededSSE)
const;
2121 unsigned &NeededSSE)
const;
2123 bool IsIllegalVectorType(
QualType Ty)
const;
2130 bool honorsRevision0_98()
const {
2136 bool classifyIntegerMMXAsSSE()
const {
2138 if (
getContext().getLangOpts().getClangABICompat() <=
2143 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2145 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2153 bool Has64BitPointers;
2158 Has64BitPointers(CGT.
getDataLayout().getPointerSize(0) == 8) {
2162 unsigned neededInt, neededSSE;
2168 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2169 return (vectorTy->getBitWidth() > 128);
2181 bool has64BitPointers()
const {
2182 return Has64BitPointers;
2186 bool asReturnValue)
const override {
2189 bool isSwiftErrorInRegister()
const override {
2199 IsMingw64(
getTarget().getTriple().isWindowsGNUEnvironment()) {}
2208 return isX86VectorTypeForVectorCall(
getContext(), Ty);
2212 uint64_t NumMembers)
const override {
2214 return isX86VectorCallAggregateSmallEnough(NumMembers);
2218 bool asReturnValue)
const override {
2222 bool isSwiftErrorInRegister()
const override {
2228 bool IsVectorCall,
bool IsRegCall)
const;
2231 void computeVectorCallArgs(
CGFunctionInfo &FI,
unsigned FreeSSERegs,
2232 bool IsVectorCall,
bool IsRegCall)
const;
2242 const X86_64ABIInfo &getABIInfo()
const {
2261 StringRef Constraint,
2263 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2266 bool isNoProtoCallVariadic(
const CallArgList &args,
2275 bool HasAVXType =
false;
2276 for (CallArgList::const_iterator
2277 it = args.begin(), ie = args.end(); it != ie; ++it) {
2278 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2293 unsigned Sig = (0xeb << 0) |
2297 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2300 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2302 if (GV->isDeclaration())
2304 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2305 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2306 llvm::Function *Fn = cast<llvm::Function>(GV);
2307 Fn->addFnAttr(
"stackrealign");
2309 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2310 llvm::Function *Fn = cast<llvm::Function>(GV);
2311 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2317 class PS4TargetCodeGenInfo :
public X86_64TargetCodeGenInfo {
2320 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2322 void getDependentLibraryOption(llvm::StringRef Lib,
2326 if (Lib.find(
" ") != StringRef::npos)
2327 Opt +=
"\"" + Lib.str() +
"\"";
2333 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2337 bool Quote = (Lib.find(
" ") != StringRef::npos);
2338 std::string ArgStr = Quote ?
"\"" :
"";
2340 if (!Lib.endswith_lower(
".lib"))
2342 ArgStr += Quote ?
"\"" :
"";
2346 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2349 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2350 unsigned NumRegisterParameters)
2351 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2352 Win32StructABI, NumRegisterParameters,
false) {}
2354 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2357 void getDependentLibraryOption(llvm::StringRef Lib,
2359 Opt =
"/DEFAULTLIB:";
2360 Opt += qualifyWindowsLibrary(Lib);
2363 void getDetectMismatchOption(llvm::StringRef Name,
2364 llvm::StringRef
Value,
2366 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2370 static void addStackProbeTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2372 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2375 Fn->addFnAttr(
"stack-probe-size",
2378 Fn->addFnAttr(
"no-stack-arg-probe");
2382 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2384 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2385 if (GV->isDeclaration())
2387 addStackProbeTargetAttributes(D, GV, CGM);
2396 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2413 void getDependentLibraryOption(llvm::StringRef Lib,
2415 Opt =
"/DEFAULTLIB:";
2416 Opt += qualifyWindowsLibrary(Lib);
2419 void getDetectMismatchOption(llvm::StringRef Name,
2420 llvm::StringRef
Value,
2422 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2426 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2429 if (GV->isDeclaration())
2431 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2432 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2433 llvm::Function *Fn = cast<llvm::Function>(GV);
2434 Fn->addFnAttr(
"stackrealign");
2436 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2437 llvm::Function *Fn = cast<llvm::Function>(GV);
2438 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2442 addStackProbeTargetAttributes(D, GV, CGM);
2446 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2471 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2473 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2475 if (Hi == SSEUp && Lo != SSE)
2479 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2503 assert((Accum != Memory && Accum != ComplexX87) &&
2504 "Invalid accumulated classification during merge.");
2505 if (Accum == Field || Field == NoClass)
2507 if (Field == Memory)
2509 if (Accum == NoClass)
2513 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2514 Accum == X87 || Accum == X87Up)
2519 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
2520 Class &Lo, Class &Hi,
bool isNamedArg)
const {
2531 Class &Current = OffsetBase < 64 ? Lo : Hi;
2537 if (k == BuiltinType::Void) {
2539 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2542 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2544 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2546 }
else if (k == BuiltinType::LongDouble) {
2548 if (LDF == &llvm::APFloat::IEEEquad()) {
2551 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2554 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
2557 llvm_unreachable(
"unexpected long double representation!");
2566 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2577 if (Has64BitPointers) {
2584 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2585 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2586 if (EB_FuncPtr != EB_ThisAdj) {
2600 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2609 uint64_t EB_Lo = (OffsetBase) / 64;
2610 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2613 }
else if (Size == 64) {
2614 QualType ElementType = VT->getElementType();
2623 if (!classifyIntegerMMXAsSSE() &&
2634 if (OffsetBase && OffsetBase != 64)
2636 }
else if (Size == 128 ||
2637 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2665 else if (Size <= 128)
2673 if (LDF == &llvm::APFloat::IEEEquad())
2675 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2676 Current = ComplexX87;
2677 else if (LDF == &llvm::APFloat::IEEEdouble())
2680 llvm_unreachable(
"unexpected long double representation!");
2685 uint64_t EB_Real = (OffsetBase) / 64;
2687 if (Hi == NoClass && EB_Real != EB_Imag)
2707 if (OffsetBase %
getContext().getTypeAlign(AT->getElementType()))
2714 uint64_t ArraySize = AT->getSize().getZExtValue();
2721 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2724 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
2725 Class FieldLo, FieldHi;
2726 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2727 Lo = merge(Lo, FieldLo);
2728 Hi = merge(Hi, FieldHi);
2729 if (Lo == Memory || Hi == Memory)
2733 postMerge(Size, Lo, Hi);
2734 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2764 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2765 for (
const auto &I : CXXRD->bases()) {
2766 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2767 "Unexpected base class!");
2769 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2776 Class FieldLo, FieldHi;
2779 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2780 Lo = merge(Lo, FieldLo);
2781 Hi = merge(Hi, FieldHi);
2782 if (Lo == Memory || Hi == Memory) {
2783 postMerge(Size, Lo, Hi);
2792 i != e; ++i, ++idx) {
2794 bool BitField = i->isBitField();
2797 if (BitField && i->isUnnamedBitfield())
2807 if (Size > 128 && (Size !=
getContext().getTypeSize(i->getType()) ||
2808 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2810 postMerge(Size, Lo, Hi);
2814 if (!BitField && Offset %
getContext().getTypeAlign(i->getType())) {
2816 postMerge(Size, Lo, Hi);
2826 Class FieldLo, FieldHi;
2832 assert(!i->isUnnamedBitfield());
2834 uint64_t Size = i->getBitWidthValue(
getContext());
2836 uint64_t EB_Lo = Offset / 64;
2837 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2840 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2845 FieldHi = EB_Hi ?
Integer : NoClass;
2848 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2849 Lo = merge(Lo, FieldLo);
2850 Hi = merge(Hi, FieldHi);
2851 if (Lo == Memory || Hi == Memory)
2855 postMerge(Size, Lo, Hi);
2865 Ty = EnumTy->getDecl()->getIntegerType();
2874 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2877 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2878 if (Size <= 64 || Size > LargestVector)
2886 unsigned freeIntRegs)
const {
2898 Ty = EnumTy->getDecl()->getIntegerType();
2932 if (freeIntRegs == 0) {
2937 if (Align == 8 && Size <= 64)
2954 if (isa<llvm::VectorType>(IRType) ||
2955 IRType->getTypeID() == llvm::Type::FP128TyID)
2960 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2963 return llvm::VectorType::get(llvm::Type::getDoubleTy(
getVMContext()),
2979 unsigned TySize = (unsigned)Context.
getTypeSize(Ty);
2980 if (TySize <= StartBit)
2984 unsigned EltSize = (unsigned)Context.
getTypeSize(AT->getElementType());
2985 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2988 for (
unsigned i = 0; i != NumElts; ++i) {
2990 unsigned EltOffset = i*EltSize;
2991 if (EltOffset >= EndBit)
break;
2993 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2995 EndBit-EltOffset, Context))
3007 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3008 for (
const auto &I : CXXRD->bases()) {
3009 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3010 "Unexpected base class!");
3012 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
3016 if (BaseOffset >= EndBit)
continue;
3018 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3020 EndBit-BaseOffset, Context))
3031 i != e; ++i, ++idx) {
3035 if (FieldOffset >= EndBit)
break;
3037 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3056 const llvm::DataLayout &TD) {
3058 if (IROffset == 0 && IRType->isFloatTy())
3062 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3063 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3064 unsigned Elt = SL->getElementContainingOffset(IROffset);
3065 IROffset -= SL->getElementOffset(Elt);
3070 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3072 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3073 IROffset -= IROffset/EltSize*EltSize;
3084 GetSSETypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3085 QualType SourceTy,
unsigned SourceOffset)
const {
3098 return llvm::VectorType::get(llvm::Type::getFloatTy(
getVMContext()), 2);
3119 GetINTEGERTypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3120 QualType SourceTy,
unsigned SourceOffset)
const {
3123 if (IROffset == 0) {
3125 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3126 IRType->isIntegerTy(64))
3135 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3136 IRType->isIntegerTy(32) ||
3137 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3138 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3139 cast<llvm::IntegerType>(IRType)->getBitWidth();
3147 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3149 const llvm::StructLayout *SL =
getDataLayout().getStructLayout(STy);
3150 if (IROffset < SL->getSizeInBytes()) {
3151 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3152 IROffset -= SL->getElementOffset(FieldIdx);
3154 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3155 SourceTy, SourceOffset);
3159 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3162 unsigned EltOffset = IROffset/EltSize*EltSize;
3163 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3169 unsigned TySizeInBytes =
3172 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
3177 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3188 const llvm::DataLayout &TD) {
3193 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3194 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3195 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3196 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
3208 if (Lo->isFloatTy())
3209 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3211 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3212 &&
"Invalid/unknown lo type");
3213 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3217 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3220 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3221 "Invalid x86-64 argument pair!");
3229 X86_64ABIInfo::Class Lo, Hi;
3230 classify(RetTy, 0, Lo, Hi,
true);
3233 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3234 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3243 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3244 "Unknown missing lo part");
3249 llvm_unreachable(
"Invalid classification for lo word.");
3254 return getIndirectReturnResult(RetTy);
3259 ResType = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3263 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3266 RetTy = EnumTy->getDecl()->getIntegerType();
3277 ResType = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3290 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3291 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(
getVMContext()),
3302 llvm_unreachable(
"Invalid classification for hi word.");
3309 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3314 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3325 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3326 ResType = GetByteVectorType(RetTy);
3337 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3354 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
3360 X86_64ABIInfo::Class Lo, Hi;
3361 classify(Ty, 0, Lo, Hi, isNamedArg);
3365 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3366 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3377 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3378 "Unknown missing lo part");
3391 return getIndirectResult(Ty, freeIntRegs);
3395 llvm_unreachable(
"Invalid classification for lo word.");
3408 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3411 Ty = EnumTy->getDecl()->getIntegerType();
3425 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3439 llvm_unreachable(
"Invalid classification for hi word.");
3441 case NoClass:
break;
3446 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(Ty), 8, Ty, 8);
3468 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3469 ResType = GetByteVectorType(Ty);
3483 X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
3484 unsigned &NeededSSE)
const {
3486 assert(RT &&
"classifyRegCallStructType only valid with struct types");
3488 if (RT->getDecl()->hasFlexibleArrayMember())
3489 return getIndirectReturnResult(Ty);
3492 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3493 if (CXXRD->isDynamicClass()) {
3494 NeededInt = NeededSSE = 0;
3495 return getIndirectReturnResult(Ty);
3498 for (
const auto &I : CXXRD->bases())
3499 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3501 NeededInt = NeededSSE = 0;
3502 return getIndirectReturnResult(Ty);
3507 for (
const auto *FD : RT->getDecl()->fields()) {
3508 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3509 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3511 NeededInt = NeededSSE = 0;
3512 return getIndirectReturnResult(Ty);
3515 unsigned LocalNeededInt, LocalNeededSSE;
3517 LocalNeededSSE,
true)
3519 NeededInt = NeededSSE = 0;
3520 return getIndirectReturnResult(Ty);
3522 NeededInt += LocalNeededInt;
3523 NeededSSE += LocalNeededSSE;
3531 unsigned &NeededInt,
3532 unsigned &NeededSSE)
const {
3537 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3546 if (CallingConv == llvm::CallingConv::Win64) {
3547 WinX86_64ABIInfo Win64ABIInfo(
CGT);
3548 Win64ABIInfo.computeInfo(FI);
3552 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3555 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3556 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3557 unsigned NeededInt, NeededSSE;
3563 classifyRegCallStructType(FI.
getReturnType(), NeededInt, NeededSSE);
3564 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3565 FreeIntRegs -= NeededInt;
3566 FreeSSERegs -= NeededSSE;
3595 it != ie; ++it, ++ArgNo) {
3596 bool IsNamedArg = ArgNo < NumRequiredArgs;
3598 if (IsRegCall && it->type->isStructureOrClassType())
3599 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3602 NeededSSE, IsNamedArg);
3608 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3609 FreeIntRegs -= NeededInt;
3610 FreeSSERegs -= NeededSSE;
3612 it->info = getIndirectResult(it->type, FreeIntRegs);
3638 llvm::PointerType::getUnqual(LTy));
3647 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3648 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
3649 "overflow_arg_area.next");
3665 unsigned neededInt, neededSSE;
3673 if (!neededInt && !neededSSE)
3689 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3695 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3696 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3705 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3706 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3707 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3713 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3735 if (neededInt && neededSSE) {
3737 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3741 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3744 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3745 "Unexpected ABI info for mixed regs");
3746 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3747 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3750 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3751 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3770 }
else if (neededInt) {
3776 std::pair<CharUnits, CharUnits> SizeAlign =
3778 uint64_t TySize = SizeAlign.first.getQuantity();
3789 }
else if (neededSSE == 1) {
3794 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3813 RegAddrLo, ST->getStructElementType(0)));
3817 RegAddrHi, ST->getStructElementType(1)));
3861 WinX86_64ABIInfo::reclassifyHvaArgType(
QualType Ty,
unsigned &FreeSSERegs,
3864 const Type *
Base =
nullptr;
3865 uint64_t NumElts = 0;
3869 FreeSSERegs -= NumElts;
3870 return getDirectX86Hva();
3876 bool IsReturnType,
bool IsVectorCall,
3877 bool IsRegCall)
const {
3883 Ty = EnumTy->getDecl()->getIntegerType();
3886 uint64_t Width = Info.
Width;
3891 if (!IsReturnType) {
3901 const Type *
Base =
nullptr;
3902 uint64_t NumElts = 0;
3905 if ((IsVectorCall || IsRegCall) &&
3908 if (FreeSSERegs >= NumElts) {
3909 FreeSSERegs -= NumElts;
3915 }
else if (IsVectorCall) {
3916 if (FreeSSERegs >= NumElts &&
3918 FreeSSERegs -= NumElts;
3920 }
else if (IsReturnType) {
3933 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3940 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3950 if (BT && BT->
getKind() == BuiltinType::Bool)
3955 if (IsMingw64 && BT && BT->
getKind() == BuiltinType::LongDouble) {
3957 if (LDF == &llvm::APFloat::x87DoubleExtended())
3965 unsigned FreeSSERegs,
3967 bool IsRegCall)
const {
3972 if (Count < VectorcallMaxParamNumAsReg)
3973 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
3977 unsigned ZeroSSERegsAvail = 0;
3978 I.info = classify(I.type, ZeroSSERegsAvail,
false,
3979 IsVectorCall, IsRegCall);
3985 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
3994 unsigned FreeSSERegs = 0;
3998 }
else if (IsRegCall) {
4005 IsVectorCall, IsRegCall);
4010 }
else if (IsRegCall) {
4016 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4019 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4027 bool IsIndirect =
false;
4033 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4045 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
4046 bool IsSoftFloatABI;
4052 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4076 Ty = CTy->getElementType();
4084 const Type *AlignTy =
nullptr;
4101 if (
getTarget().getTriple().isOSDarwin()) {
4103 TI.second = getParamTypeAlignment(Ty);
4111 const unsigned OverflowLimit = 8;
4139 if (isInt || IsSoftFloatABI) {
4148 if (isI64 || (isF64 && IsSoftFloatABI)) {
4149 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4150 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4154 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
4160 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4163 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4177 if (!(isInt || IsSoftFloatABI)) {
4186 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
4194 Builder.CreateAdd(NumRegs,
4195 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4206 Builder.
CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4214 Size =
TypeInfo.first.alignTo(OverflowAreaAlign);
4225 if (Align > OverflowAreaAlign) {
4235 Builder.
CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4262 llvm::IntegerType *i8 = CGF.
Int8Ty;
4263 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4264 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4265 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4307 static const unsigned GPRBits = 64;
4310 bool IsSoftFloatABI;
4314 bool IsQPXVectorTy(
const Type *Ty)
const {
4319 unsigned NumElements = VT->getNumElements();
4320 if (NumElements == 1)
4323 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4326 }
else if (VT->getElementType()->
4327 isSpecificBuiltinType(BuiltinType::Float)) {
4336 bool IsQPXVectorTy(
QualType Ty)
const {
4344 IsSoftFloatABI(SoftFloatABI) {}
4346 bool isPromotableTypeForABI(
QualType Ty)
const;
4354 uint64_t Members)
const override;
4372 if (IsQPXVectorTy(T) ||
4388 bool asReturnValue)
const override {
4392 bool isSwiftErrorInRegister()
const override {
4401 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX,
4415 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
4417 PPC64TargetCodeGenInfo(
CodeGenTypes &
CGT) : DefaultTargetCodeGenInfo(CGT) {}
4433 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
4436 Ty = EnumTy->getDecl()->getIntegerType();
4445 switch (BT->getKind()) {
4446 case BuiltinType::Int:
4447 case BuiltinType::UInt:
4461 Ty = CTy->getElementType();
4465 if (IsQPXVectorTy(Ty)) {
4476 const Type *AlignAsType =
nullptr;
4480 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
4483 AlignAsType = EltType;
4487 const Type *
Base =
nullptr;
4488 uint64_t Members = 0;
4489 if (!AlignAsType &&
Kind == ELFv2 &&
4494 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4499 }
else if (AlignAsType) {
4518 uint64_t &Members)
const {
4520 uint64_t NElements = AT->getSize().getZExtValue();
4525 Members *= NElements;
4534 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4535 for (
const auto &I : CXXRD->bases()) {
4540 uint64_t FldMembers;
4544 Members += FldMembers;
4548 for (
const auto *FD : RD->
fields()) {
4553 if (AT->getSize().getZExtValue() == 0)
4555 FT = AT->getElementType();
4565 uint64_t FldMembers;
4570 std::max(Members, FldMembers) : Members + FldMembers);
4584 Ty = CT->getElementType();
4600 QualType EltTy = VT->getElementType();
4601 unsigned NumElements =
4616 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4620 if (BT->getKind() == BuiltinType::Float ||
4621 BT->getKind() == BuiltinType::Double ||
4622 BT->getKind() == BuiltinType::LongDouble ||
4624 (BT->getKind() == BuiltinType::Float128))) {
4637 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4638 const Type *
Base, uint64_t Members)
const {
4648 return Members * NumRegs <= 8;
4664 else if (Size < 128) {
4674 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4678 const Type *Base =
nullptr;
4679 uint64_t Members = 0;
4680 if (
Kind == ELFv2 &&
4683 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4692 if (Bits > 0 && Bits <= 8 * GPRBits) {
4697 if (Bits <= GPRBits)
4699 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4703 uint64_t RegBits = ABIAlign * 8;
4704 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4706 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4715 TyAlign > ABIAlign);
4736 else if (Size < 128) {
4744 const Type *Base =
nullptr;
4745 uint64_t Members = 0;
4746 if (
Kind == ELFv2 &&
4749 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4755 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
4760 if (Bits > GPRBits) {
4761 CoerceTy = llvm::IntegerType::get(
getVMContext(), GPRBits);
4762 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4765 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4781 TypeInfo.second = getParamTypeAlignment(Ty);
4793 if (EltSize < SlotSize) {
4795 SlotSize * 2, SlotSize,
4802 SlotSize - EltSize);
4804 2 * SlotSize - EltSize);
4835 llvm::IntegerType *i8 = CGF.
Int8Ty;
4836 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4837 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4838 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4875 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4911 ABIKind getABIKind()
const {
return Kind; }
4912 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
4918 uint64_t Members)
const override;
4920 bool isIllegalVectorType(
QualType Ty)
const;
4927 it.info = classifyArgumentType(it.type);
4938 return Kind == Win64 ?
EmitMSVAArg(CGF, VAListAddr, Ty)
4939 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4940 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4947 bool asReturnValue)
const override {
4950 bool isSwiftErrorInRegister()
const override {
4955 unsigned elts)
const override;
4963 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
4964 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
4971 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
4974 class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
4976 WindowsAArch64TargetCodeGenInfo(
CodeGenTypes &
CGT, AArch64ABIInfo::ABIKind K)
4977 : AArch64TargetCodeGenInfo(CGT, K) {}
4979 void getDependentLibraryOption(llvm::StringRef Lib,
4981 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
4984 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
4986 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
4995 if (isIllegalVectorType(Ty)) {
5008 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 2);
5013 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 4);
5022 Ty = EnumTy->getDecl()->getIntegerType();
5040 if (IsEmpty || Size == 0) {
5046 if (IsEmpty && Size == 0)
5052 const Type *Base =
nullptr;
5053 uint64_t Members = 0;
5063 if (
getTarget().isRenderScriptTarget()) {
5067 if (
Kind == AArch64ABIInfo::AAPCS) {
5069 Alignment = Alignment < 128 ? 64 : 128;
5073 Size = llvm::alignTo(Size, 64);
5077 if (Alignment < 128 && Size == 128) {
5098 RetTy = EnumTy->getDecl()->getIntegerType();
5109 const Type *Base =
nullptr;
5110 uint64_t Members = 0;
5119 if (
getTarget().isRenderScriptTarget()) {
5123 Size = llvm::alignTo(Size, 64);
5127 if (Alignment < 128 && Size == 128) {
5138 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
5141 unsigned NumElements = VT->getNumElements();
5144 if (!llvm::isPowerOf2_32(NumElements))
5146 return Size != 64 && (Size != 128 || NumElements == 1);
5151 bool AArch64ABIInfo::isLegalVectorTypeForSwift(
CharUnits totalSize,
5153 unsigned elts)
const {
5154 if (!llvm::isPowerOf2_32(elts))
5162 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5168 if (BT->isFloatingPoint())
5172 if (VecSize == 64 || VecSize == 128)
5178 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5179 uint64_t Members)
const {
5180 return Members <= 4;
5191 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5195 unsigned NumRegs = 1;
5196 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5197 BaseTy = ArrTy->getElementType();
5198 NumRegs = ArrTy->getNumElements();
5200 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5225 int RegSize = IsIndirect ? 8 : TyInfo.first.
getQuantity();
5234 RegSize = llvm::alignTo(RegSize, 8);
5243 RegSize = 16 * NumRegs;
5255 UsingStack = CGF.
Builder.CreateICmpSGE(
5256 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
5258 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5267 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
5270 reg_offs = CGF.
Builder.CreateAdd(
5271 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
5273 reg_offs = CGF.
Builder.CreateAnd(
5274 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
5283 NewOffset = CGF.
Builder.CreateAdd(
5284 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
5290 InRegs = CGF.
Builder.CreateICmpSLE(
5291 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
5293 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5305 reg_top_offset,
"reg_top_p");
5307 Address BaseAddr(CGF.
Builder.CreateInBoundsGEP(reg_top, reg_offs),
5315 MemTy = llvm::PointerType::getUnqual(MemTy);
5318 const Type *Base =
nullptr;
5319 uint64_t NumMembers = 0;
5321 if (IsHFA && NumMembers > 1) {
5326 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
5329 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5331 std::max(TyAlign, BaseTyInfo.second));
5336 BaseTyInfo.first.getQuantity() < 16)
5337 Offset = 16 - BaseTyInfo.first.getQuantity();
5339 for (
unsigned i = 0; i < NumMembers; ++i) {
5357 CharUnits SlotSize = BaseAddr.getAlignment();
5360 TyInfo.first < SlotSize) {
5384 OnStackPtr = CGF.
Builder.CreatePtrToInt(OnStackPtr, CGF.
Int64Ty);
5386 OnStackPtr = CGF.
Builder.CreateAdd(
5387 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
5389 OnStackPtr = CGF.
Builder.CreateAnd(
5390 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
5395 Address OnStackAddr(OnStackPtr,
5402 StackSize = StackSlotSize;
5404 StackSize = TyInfo.first.
alignTo(StackSlotSize);
5408 CGF.
Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC,
"new_stack");
5414 TyInfo.first < StackSlotSize) {
5429 OnStackAddr, OnStackBlock,
"vaargs.addr");
5461 bool IsIndirect =
false;
5462 if (TyInfo.first.getQuantity() > 16) {
5463 const Type *Base =
nullptr;
5464 uint64_t Members = 0;
5469 TyInfo, SlotSize,
true);
5504 bool isEABI()
const {
5505 switch (
getTarget().getTriple().getEnvironment()) {
5506 case llvm::Triple::Android:
5507 case llvm::Triple::EABI:
5508 case llvm::Triple::EABIHF:
5509 case llvm::Triple::GNUEABI:
5510 case llvm::Triple::GNUEABIHF:
5511 case llvm::Triple::MuslEABI:
5512 case llvm::Triple::MuslEABIHF:
5519 bool isEABIHF()
const {
5520 switch (
getTarget().getTriple().getEnvironment()) {
5521 case llvm::Triple::EABIHF:
5522 case llvm::Triple::GNUEABIHF:
5523 case llvm::Triple::MuslEABIHF:
5530 ABIKind getABIKind()
const {
return Kind; }
5535 bool isIllegalVectorType(
QualType Ty)
const;
5539 uint64_t Members)
const override;
5551 bool asReturnValue)
const override {
5554 bool isSwiftErrorInRegister()
const override {
5558 unsigned elts)
const override;
5566 const ARMABIInfo &getABIInfo()
const {
5574 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5575 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5587 unsigned getSizeOfUnwindException()
const override {
5588 if (getABIInfo().isEABI())
return 88;
5592 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5594 if (GV->isDeclaration())
5596 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5600 const ARMInterruptAttr *
Attr = FD->
getAttr<ARMInterruptAttr>();
5605 switch (Attr->getInterrupt()) {
5606 case ARMInterruptAttr::Generic: Kind =
"";
break;
5607 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
5608 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
5609 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
5610 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
5611 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
5614 llvm::Function *Fn = cast<llvm::Function>(GV);
5616 Fn->addFnAttr(
"interrupt", Kind);
5618 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5619 if (ABI == ARMABIInfo::APCS)
5625 llvm::AttrBuilder B;
5626 B.addStackAlignmentAttr(8);
5627 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5631 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
5634 : ARMTargetCodeGenInfo(CGT, K) {}
5636 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5639 void getDependentLibraryOption(llvm::StringRef Lib,
5641 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5644 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5646 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5650 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5652 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5653 if (GV->isDeclaration())
5655 addStackProbeTargetAttributes(D, GV, CGM);
5679 if (isEABIHF() ||
getTarget().getTriple().isWatchABI())
5680 return llvm::CallingConv::ARM_AAPCS_VFP;
5682 return llvm::CallingConv::ARM_AAPCS;
5684 return llvm::CallingConv::ARM_APCS;
5690 switch (getABIKind()) {
5691 case APCS:
return llvm::CallingConv::ARM_APCS;
5692 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
5693 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5694 case AAPCS16_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5696 llvm_unreachable(
"bad ABI kind");
5699 void ARMABIInfo::setCCs() {
5705 if (abiCC != getLLVMDefaultCC())
5710 bool isVariadic)
const {
5718 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5723 if (isIllegalVectorType(Ty)) {
5748 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5757 Ty = EnumTy->getDecl()->getIntegerType();
5772 if (IsEffectivelyAAPCS_VFP) {
5775 const Type *Base =
nullptr;
5776 uint64_t Members = 0;
5778 assert(Base &&
"Base class should be set for homogeneous aggregate");
5782 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5786 const Type *Base =
nullptr;
5787 uint64_t Members = 0;
5789 assert(Base && Members <= 4 &&
"unexpected homogeneous aggregate");
5796 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5809 uint64_t ABIAlign = 4;
5811 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5812 getABIKind() == ARMABIInfo::AAPCS) {
5819 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP &&
"unexpected byval");
5822 TyAlign > ABIAlign);
5827 if (
getTarget().isRenderScriptTarget()) {
5848 llvm::LLVMContext &VMContext) {
5880 if (!RT)
return false;
5891 bool HadField =
false;
5894 i != e; ++i, ++idx) {
5933 bool isVariadic)
const {
5934 bool IsEffectivelyAAPCS_VFP =
5935 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5950 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5959 RetTy = EnumTy->getDecl()->getIntegerType();
5966 if (getABIKind() == APCS) {
5999 if (IsEffectivelyAAPCS_VFP) {
6000 const Type *Base =
nullptr;
6001 uint64_t Members = 0;
6003 assert(Base &&
"Base class should be set for homogeneous aggregate");
6015 if (
getTarget().isRenderScriptTarget()) {
6028 }
else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6031 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6039 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
6047 unsigned NumElements = VT->getNumElements();
6049 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6053 unsigned NumElements = VT->getNumElements();
6056 if (!llvm::isPowerOf2_32(NumElements))
6065 bool ARMABIInfo::isLegalVectorTypeForSwift(
CharUnits vectorSize,
6067 unsigned numElts)
const {
6068 if (!llvm::isPowerOf2_32(numElts))
6070 unsigned size =
getDataLayout().getTypeStoreSizeInBits(eltTy);
6079 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
6083 if (BT->getKind() == BuiltinType::Float ||
6084 BT->getKind() == BuiltinType::Double ||
6085 BT->getKind() == BuiltinType::LongDouble)
6089 if (VecSize == 64 || VecSize == 128)
6095 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
6096 uint64_t Members)
const {
6097 return Members <= 4;
6112 CharUnits TyAlignForABI = TyInfo.second;
6115 bool IsIndirect =
false;
6116 const Type *Base =
nullptr;
6117 uint64_t Members = 0;
6124 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6132 }
else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6133 getABIKind() == ARMABIInfo::AAPCS) {
6136 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6143 TyInfo.second = TyAlignForABI;
6155 class NVPTXABIInfo :
public ABIInfo {
6172 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6174 bool shouldEmitStaticExternCAliases()
const override;
6179 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
6192 RetTy = EnumTy->getDecl()->getIntegerType();
6201 Ty = EnumTy->getDecl()->getIntegerType();
6226 llvm_unreachable(
"NVPTX does not support varargs");
6229 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6231 if (GV->isDeclaration())
6233 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6236 llvm::Function *F = cast<llvm::Function>(GV);
6242 if (FD->
hasAttr<OpenCLKernelAttr>()) {
6245 addNVVMMetadata(F,
"kernel", 1);
6247 F->addFnAttr(llvm::Attribute::NoInline);
6256 if (FD->
hasAttr<CUDAGlobalAttr>()) {
6258 addNVVMMetadata(F,
"kernel", 1);
6260 if (CUDALaunchBoundsAttr *
Attr = FD->
getAttr<CUDALaunchBoundsAttr>()) {
6262 llvm::APSInt MaxThreads(32);
6263 MaxThreads =
Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
6265 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
6270 if (
Attr->getMinBlocks()) {
6271 llvm::APSInt MinBlocks(32);
6272 MinBlocks =
Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
6275 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
6281 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6283 llvm::Module *M = F->getParent();
6287 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
6289 llvm::Metadata *MDVals[] = {
6290 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6291 llvm::ConstantAsMetadata::get(
6292 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6294 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6297 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
6315 bool isPromotableIntegerType(
QualType Ty)
const;
6316 bool isCompoundType(
QualType Ty)
const;
6317 bool isVectorArgumentType(
QualType Ty)
const;
6318 bool isFPArgumentType(
QualType Ty)
const;
6328 I.info = classifyArgumentType(I.type);
6335 bool asReturnValue)
const override {
6338 bool isSwiftErrorInRegister()
const override {
6351 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
6354 Ty = EnumTy->getDecl()->getIntegerType();
6362 switch (BT->getKind()) {
6363 case BuiltinType::Int:
6364 case BuiltinType::UInt:
6372 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
6378 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
6379 return (HasVector &&
6384 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
6386 switch (BT->getKind()) {
6387 case BuiltinType::Float:
6388 case BuiltinType::Double:
6403 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6404 for (
const auto &I : CXXRD->bases()) {
6413 Found = GetSingleElementType(Base);
6417 for (
const auto *FD : RD->
fields()) {
6429 Found = GetSingleElementType(FD->getType());
6460 bool InFPRs =
false;
6461 bool IsVector =
false;
6465 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6470 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6471 IsVector = ArgTy->isVectorTy();
6472 UnpaddedSize = TyInfo.first;
6473 DirectAlign = TyInfo.second;
6476 if (IsVector && UnpaddedSize > PaddedSize)
6478 assert((UnpaddedSize <= PaddedSize) &&
"Invalid argument size.");
6480 CharUnits Padding = (PaddedSize - UnpaddedSize);
6484 llvm::ConstantInt::get(IndexTy, PaddedSize.
getQuantity());
6492 "overflow_arg_area_ptr");
6502 "overflow_arg_area");
6510 unsigned MaxRegs, RegCountField, RegSaveIndex;
6521 RegPadding = Padding;
6528 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6535 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6542 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
6544 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.
getQuantity()
6547 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
6550 "reg_save_area_ptr");
6560 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6562 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
6583 "overflow_arg_area");
6590 MemAddr, InMemBlock,
"va_arg.addr");
6602 if (isVectorArgumentType(RetTy))
6616 if (isPromotableIntegerType(Ty))
6623 QualType SingleElementTy = GetSingleElementType(Ty);
6624 if (isVectorArgumentType(SingleElementTy) &&
6625 getContext().getTypeSize(SingleElementTy) == Size)
6629 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6642 if (isFPArgumentType(SingleElementTy)) {
6643 assert(Size == 32 || Size == 64);
6654 if (isCompoundType(Ty))
6670 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6676 void MSP430TargetCodeGenInfo::setTargetAttributes(
6678 if (GV->isDeclaration())
6680 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6681 if (
const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
6683 llvm::Function *F = cast<llvm::Function>(GV);
6686 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6689 F->addFnAttr(llvm::Attribute::NoInline);
6692 unsigned Num = attr->getNumber() / 2;
6694 "__isr_" + Twine(Num), F);
6705 class MipsABIInfo :
public ABIInfo {
6707 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6708 void CoerceToIntArgs(uint64_t TySize,
6715 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6716 StackAlignInBytes(IsO32 ? 8 : 16) {}
6727 unsigned SizeOfUnwindException;
6731 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6737 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6739 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6741 llvm::Function *Fn = cast<llvm::Function>(GV);
6743 if (FD->
hasAttr<MipsLongCallAttr>())
6744 Fn->addFnAttr(
"long-call");
6745 else if (FD->
hasAttr<MipsShortCallAttr>())
6746 Fn->addFnAttr(
"short-call");
6749 if (GV->isDeclaration())
6752 if (FD->
hasAttr<Mips16Attr>()) {
6753 Fn->addFnAttr(
"mips16");
6755 else if (FD->
hasAttr<NoMips16Attr>()) {
6756 Fn->addFnAttr(
"nomips16");
6759 if (FD->
hasAttr<MicroMipsAttr>())
6760 Fn->addFnAttr(
"micromips");
6761 else if (FD->
hasAttr<NoMicroMipsAttr>())
6762 Fn->addFnAttr(
"nomicromips");
6764 const MipsInterruptAttr *
Attr = FD->
getAttr<MipsInterruptAttr>();
6769 switch (Attr->getInterrupt()) {
6770 case MipsInterruptAttr::eic: Kind =
"eic";
break;
6771 case MipsInterruptAttr::sw0: Kind =
"sw0";
break;
6772 case MipsInterruptAttr::sw1: Kind =
"sw1";
break;
6773 case MipsInterruptAttr::hw0: Kind =
"hw0";
break;
6774 case MipsInterruptAttr::hw1: Kind =
"hw1";
break;
6775 case MipsInterruptAttr::hw2: Kind =
"hw2";
break;
6776 case MipsInterruptAttr::hw3: Kind =
"hw3";
break;
6777 case MipsInterruptAttr::hw4: Kind =
"hw4";
break;
6778 case MipsInterruptAttr::hw5: Kind =
"hw5";
break;
6781 Fn->addFnAttr(
"interrupt", Kind);
6788 unsigned getSizeOfUnwindException()
const override {
6789 return SizeOfUnwindException;
6794 void MipsABIInfo::CoerceToIntArgs(
6796 llvm::IntegerType *IntTy =
6797 llvm::IntegerType::get(
getVMContext(), MinABIStackAlignInBytes * 8);
6800 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6801 ArgList.push_back(IntTy);
6804 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6807 ArgList.push_back(llvm::IntegerType::get(
getVMContext(), R));
6816 CoerceToIntArgs(TySize, ArgList);
6827 CoerceToIntArgs(TySize, ArgList);
6833 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
6835 uint64_t LastOffset = 0;
6837 llvm::IntegerType *I64 = llvm::IntegerType::get(
getVMContext(), 64);
6842 i != e; ++i, ++idx) {
6846 if (!BT || BT->
getKind() != BuiltinType::Double)
6854 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6855 ArgList.push_back(I64);
6858 ArgList.push_back(llvm::Type::getDoubleTy(
getVMContext()));
6859 LastOffset = Offset + 64;
6862 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6863 ArgList.append(IntArgList.begin(), IntArgList.end());
6868 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6870 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6873 return llvm::IntegerType::get(
getVMContext(), (Offset - OrigOffset) * 8);
6880 uint64_t OrigOffset =
Offset;
6885 (uint64_t)StackAlignInBytes);
6886 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6887 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6895 Offset = OrigOffset + MinABIStackAlignInBytes;
6904 getPaddingType(OrigOffset, CurrOffset));
6911 Ty = EnumTy->getDecl()->getIntegerType();
6915 return extendType(Ty);
6918 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
6922 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
6942 for (; b != e; ++b) {
6959 CoerceToIntArgs(Size, RTList);
6971 if (!IsO32 && Size == 0)
6995 RetTy = EnumTy->getDecl()->getIntegerType();
7025 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7027 bool DidPromote =
false;
7047 TyInfo, ArgSlotSize,
true);
7118 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7120 if (GV->isDeclaration())
7122 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7124 auto *Fn = cast<llvm::Function>(GV);
7126 if (FD->getAttr<AVRInterruptAttr>())
7127 Fn->addFnAttr(
"interrupt");
7129 if (FD->getAttr<AVRSignalAttr>())
7130 Fn->addFnAttr(
"signal");
7143 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
7146 : DefaultTargetCodeGenInfo(CGT) {}
7148 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7152 void TCETargetCodeGenInfo::setTargetAttributes(
7154 if (GV->isDeclaration())
7156 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7159 llvm::Function *F = cast<llvm::Function>(GV);
7162 if (FD->
hasAttr<OpenCLKernelAttr>()) {
7164 F->addFnAttr(llvm::Attribute::NoInline);
7165 const ReqdWorkGroupSizeAttr *
Attr = FD->
getAttr<ReqdWorkGroupSizeAttr>();
7168 llvm::LLVMContext &Context = F->getContext();
7169 llvm::NamedMDNode *OpenCLMetadata =
7171 "opencl.kernel_wg_size_info");
7174 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7177 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7178 M.
Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7180 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7181 M.
Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7183 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7184 M.
Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7190 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7191 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7205 class HexagonABIInfo :
public ABIInfo {
7245 Ty = EnumTy->getDecl()->getIntegerType();
7283 RetTy = EnumTy->getDecl()->getIntegerType();
7323 class LanaiABIInfo :
public DefaultABIInfo {
7327 bool shouldUseInReg(
QualType Ty, CCState &State)
const;
7350 bool LanaiABIInfo::shouldUseInReg(
QualType Ty, CCState &State)
const {
7352 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7354 if (SizeInRegs == 0)
7357 if (SizeInRegs > State.FreeRegs) {
7362 State.FreeRegs -= SizeInRegs;
7368 CCState &State)
const {
7370 if (State.FreeRegs) {
7378 const unsigned MinABIStackAlignInBytes = 4;
7382 MinABIStackAlignInBytes);
7386 CCState &State)
const {
7392 return getIndirectResult(Ty,
false, State);
7401 return getIndirectResult(Ty,
true, State);
7409 if (SizeInRegs <= State.FreeRegs) {
7410 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7412 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7413 State.FreeRegs -= SizeInRegs;
7418 return getIndirectResult(Ty,
true, State);
7423 Ty = EnumTy->getDecl()->getIntegerType();
7425 bool InReg = shouldUseInReg(Ty, State);
7450 class AMDGPUABIInfo final :
public DefaultABIInfo {
7452 static const unsigned MaxNumRegsForArgsRet = 16;
7454 unsigned numRegsForType(
QualType Ty)
const;
7458 uint64_t Members)
const override;
7462 DefaultABIInfo(CGT) {}
7471 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
7475 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7476 const Type *Base, uint64_t Members)
const {
7480 return Members * NumRegs <= MaxNumRegsForArgsRet;
7484 unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
7485 unsigned NumRegs = 0;
7490 QualType EltTy = VT->getElementType();
7495 return (VT->getNumElements() + 1) / 2;
7497 unsigned EltNumRegs = (EltSize + 31) / 32;
7498 return EltNumRegs * VT->getNumElements();
7506 QualType FieldTy = Field->getType();
7507 NumRegs += numRegsForType(FieldTy);
7513 return (
getContext().getTypeSize(Ty) + 31) / 32;
7522 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7524 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7525 Arg.info = classifyKernelArgumentType(Arg.type);
7564 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7591 unsigned &NumRegsLeft)
const {
7592 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
7621 unsigned NumRegs = (Size + 31) / 32;
7622 NumRegsLeft -=
std::min(NumRegsLeft, NumRegs);
7635 if (NumRegsLeft > 0) {
7636 unsigned NumRegs = numRegsForType(Ty);
7637 if (NumRegsLeft >= NumRegs) {
7638 NumRegsLeft -= NumRegs;
7647 unsigned NumRegs = numRegsForType(Ty);
7648 NumRegsLeft -=
std::min(NumRegs, NumRegsLeft);
7658 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7660 unsigned getOpenCLKernelCallingConv()
const override;
7663 llvm::PointerType *T,
QualType QT)
const override;
7665 LangAS getASTAllocaAddressSpace()
const override {
7670 const VarDecl *D)
const override;
7672 llvm::LLVMContext &C)
const override;
7675 llvm::Function *BlockInvokeFunc,
7677 bool shouldEmitStaticExternCAliases()
const override;
7682 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7684 if (GV->isDeclaration())
7686 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7690 llvm::Function *F = cast<llvm::Function>(GV);
7693 FD->
getAttr<ReqdWorkGroupSizeAttr>() :
nullptr;
7696 (M.
getTriple().getOS() == llvm::Triple::AMDHSA))
7697 F->addFnAttr(
"amdgpu-implicitarg-num-bytes",
"48");
7699 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7700 if (ReqdWGS || FlatWGS) {
7701 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
7702 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
7703 if (ReqdWGS && Min == 0 && Max == 0)
7704 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7707 assert(Min <= Max &&
"Min must be less than or equal Max");
7709 std::string AttrVal = llvm::utostr(Min) +
"," + llvm::utostr(Max);
7710 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
7712 assert(Max == 0 &&
"Max must be zero");
7715 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>()) {
7716 unsigned Min =
Attr->getMin();
7717 unsigned Max =
Attr->getMax();
7720 assert((Max == 0 || Min <= Max) &&
"Min must be less than or equal Max");
7722 std::string AttrVal = llvm::utostr(Min);
7724 AttrVal = AttrVal +
"," + llvm::utostr(Max);
7725 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
7727 assert(Max == 0 &&
"Max must be zero");
7730 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
7731 unsigned NumSGPR =
Attr->getNumSGPR();
7734 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7737 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
7738 uint32_t NumVGPR =
Attr->getNumVGPR();
7741 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7745 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
7746 return llvm::CallingConv::AMDGPU_KERNEL;
7754 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7758 return llvm::ConstantPointerNull::get(PT);
7761 auto NPT = llvm::PointerType::get(PT->getElementType(),
7763 return llvm::ConstantExpr::getAddrSpaceCast(
7764 llvm::ConstantPointerNull::get(NPT), PT);
7768 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
7772 "Address space agnostic languages only");
7776 return DefaultGlobalAS;
7785 return ConstAS.getValue();
7787 return DefaultGlobalAS;
7791 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
SyncScope S,
7792 llvm::LLVMContext &C)
const {
7807 return C.getOrInsertSyncScopeID(Name);
7810 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
7816 FT = getABIInfo().getContext().adjustFunctionType(
7827 class SparcV8ABIInfo :
public DefaultABIInfo {
7890 class SparcV9ABIInfo :
public ABIInfo {
7911 struct CoerceBuilder {
7912 llvm::LLVMContext &Context;
7913 const llvm::DataLayout &DL;
7918 CoerceBuilder(llvm::LLVMContext &c,
const llvm::DataLayout &dl)
7919 : Context(c), DL(dl), Size(0), InReg(
false) {}
7922 void pad(uint64_t ToSize) {
7923 assert(ToSize >= Size &&
"Cannot remove elements");
7928 uint64_t Aligned = llvm::alignTo(Size, 64);
7929 if (Aligned > Size && Aligned <= ToSize) {
7930 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
7935 while (Size + 64 <= ToSize) {
7936 Elems.push_back(llvm::Type::getInt64Ty(Context));
7941 if (Size < ToSize) {
7942 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
7956 Elems.push_back(Ty);
7957 Size = Offset + Bits;
7961 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7962 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7963 for (
unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7964 llvm::Type *ElemTy = StrTy->getElementType(i);
7965 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7966 switch (ElemTy->getTypeID()) {
7967 case llvm::Type::StructTyID:
7968 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7970 case llvm::Type::FloatTyID:
7971 addFloat(ElemOffset, ElemTy, 32);
7973 case llvm::Type::DoubleTyID:
7974 addFloat(ElemOffset, ElemTy, 64);
7976 case llvm::Type::FP128TyID:
7977 addFloat(ElemOffset, ElemTy, 128);
7979 case llvm::Type::PointerTyID:
7980 if (ElemOffset % 64 == 0) {
7982 Elems.push_back(ElemTy);
7993 bool isUsableType(llvm::StructType *Ty)
const {
7994 return llvm::makeArrayRef(Elems) == Ty->elements();
7999 if (Elems.size() == 1)
8000 return Elems.front();
8002 return llvm::StructType::get(Context, Elems);
8017 if (Size > SizeLimit)
8022 Ty = EnumTy->getDecl()->getIntegerType();
8025 if (Size < 64 && Ty->isIntegerType())
8039 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(
CGT.
ConvertType(Ty));
8044 CB.addStruct(0, StrTy);
8045 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8048 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8067 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8077 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8109 return Builder.
CreateBitCast(ArgAddr, ArgPtrTy,
"arg.addr");
8141 llvm::IntegerType *i8 = CGF.
Int8Ty;
8142 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8143 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8232 class TypeStringCache {
8233 enum Status {NonRecursive, Recursive,
Incomplete, IncompleteUsed};
8237 std::string Swapped;
8240 std::map<const IdentifierInfo *, struct Entry> Map;
8241 unsigned IncompleteCount;
8242 unsigned IncompleteUsedCount;
8244 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8254 class FieldEncoding {
8258 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8259 StringRef str() {
return Enc; }
8260 bool operator<(
const FieldEncoding &rhs)
const {
8261 if (HasName != rhs.HasName)
return HasName;
8262 return Enc < rhs.Enc;
8266 class XCoreABIInfo :
public DefaultABIInfo {
8274 mutable TypeStringCache TSC;
8278 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8298 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8299 AI.setCoerceToType(ArgTy);
8300 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8304 switch (AI.getKind()) {
8308 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8310 Val =
Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8318 ArgSize = ArgSize.
alignTo(SlotSize);
8343 std::string StubEnc) {
8347 assert( (E.Str.empty() || E.State == Recursive) &&
8348 "Incorrectly use of addIncomplete");
8349 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
8350 E.Swapped.swap(E.Str);
8351 E.Str.swap(StubEnc);
8360 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
8363 auto I = Map.find(ID);
8364 assert(I != Map.end() &&
"Entry not present");
8365 Entry &E = I->second;
8367 E.State == IncompleteUsed) &&
8368 "Entry must be an incomplete type");
8369 bool IsRecursive =
false;
8370 if (E.State == IncompleteUsed) {
8373 --IncompleteUsedCount;
8375 if (E.Swapped.empty())
8379 E.Swapped.swap(E.Str);
8381 E.State = Recursive;
8389 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
8391 if (!ID || IncompleteUsedCount)
8394 if (IsRecursive && !E.Str.empty()) {
8395 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8396 "This is not the same Recursive entry");
8402 assert(E.Str.empty() &&
"Entry already present");
8404 E.State = IsRecursive? Recursive : NonRecursive;
8413 auto I = Map.find(ID);
8416 Entry &E = I->second;
8417 if (E.State == Recursive && IncompleteCount)
8422 E.State = IncompleteUsed;
8423 ++IncompleteUsedCount;
8444 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8448 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
8449 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8450 llvm::MDString::get(Ctx, Enc.str())};
8451 llvm::NamedMDNode *MD =
8452 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
8453 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8466 unsigned getOpenCLKernelCallingConv()
const override;
8474 DefaultABIInfo SPIRABI(CGM.
getTypes());
8475 SPIRABI.computeInfo(FI);
8480 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
8481 return llvm::CallingConv::SPIR_KERNEL;
8486 TypeStringCache &TSC);
8494 TypeStringCache &TSC) {
8495 for (
const auto *Field : RD->
fields()) {
8498 Enc += Field->getName();
8500 if (Field->isBitField()) {
8502 llvm::raw_svector_ostream OS(Enc);
8503 OS << Field->getBitWidthValue(CGM.
getContext());
8506 if (!
appendType(Enc, Field->getType(), CGM, TSC))
8508 if (Field->isBitField())
8511 FE.emplace_back(!Field->getName().empty(), Enc);
8523 StringRef TypeString = TSC.lookupStr(ID);
8524 if (!TypeString.empty()) {
8530 size_t Start = Enc.size();
8538 bool IsRecursive =
false;
8545 std::string StubEnc(Enc.substr(Start).str());
8547 TSC.addIncomplete(ID, std::move(StubEnc));
8549 (void) TSC.removeIncomplete(ID);
8552 IsRecursive = TSC.removeIncomplete(ID);
8556 llvm::sort(FE.begin(), FE.end());
8558 unsigned E = FE.size();
8559 for (
unsigned I = 0; I != E; ++I) {
8566 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8572 TypeStringCache &TSC,
8575 StringRef TypeString = TSC.lookupStr(ID);
8576 if (!TypeString.empty()) {
8581 size_t Start = Enc.size();
8590 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8592 SmallStringEnc EnumEnc;
8594 EnumEnc += I->getName();
8596 I->getInitVal().toString(EnumEnc);
8598 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8600 llvm::sort(FE.begin(), FE.end());
8601 unsigned E = FE.size();
8602 for (
unsigned I = 0; I != E; ++I) {
8609 TSC.addIfComplete(ID, Enc.substr(Start),
false);
8617 static const char *
const Table[]={
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
8625 Enc += Table[Lookup];
8630 const char *EncType;
8632 case BuiltinType::Void:
8635 case BuiltinType::Bool:
8638 case BuiltinType::Char_U:
8641 case BuiltinType::UChar:
8644 case BuiltinType::SChar:
8647 case BuiltinType::UShort:
8650 case BuiltinType::Short:
8653 case BuiltinType::UInt:
8656 case BuiltinType::Int:
8659 case BuiltinType::ULong:
8662 case BuiltinType::Long:
8665 case BuiltinType::ULongLong:
8668 case BuiltinType::LongLong:
8671 case BuiltinType::Float:
8674 case BuiltinType::Double:
8677 case BuiltinType::LongDouble:
8690 TypeStringCache &TSC) {
8702 TypeStringCache &TSC, StringRef NoSizeEnc) {
8707 CAT->getSize().toStringUnsigned(Enc);
8723 TypeStringCache &TSC) {
8730 auto I = FPT->param_type_begin();
8731 auto E = FPT->param_type_end();
8740 if (FPT->isVariadic())
8743 if (FPT->isVariadic())
8757 TypeStringCache &TSC) {
8794 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
8797 return appendType(Enc, FD->getType(), CGM, TSC);
8800 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
8803 QualType QT = VD->getType().getCanonicalType();
8820 class RISCVABIInfo :
public DefaultABIInfo {
8823 static const int NumArgGPRs = 8;
8827 : DefaultABIInfo(CGT), XLen(XLen) {}
8834 int &ArgGPRsLeft)
const;
8861 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
8866 bool IsFixed = ArgNum < NumFixedArgs;
8873 int &ArgGPRsLeft)
const {
8874 assert(ArgGPRsLeft <= NumArgGPRs &&
"Arg GPR tracking underflow");
8892 bool MustUseStack =
false;
8896 int NeededArgGPRs = 1;
8897 if (!IsFixed && NeededAlign == 2 * XLen)
8898 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
8899 else if (Size > XLen && Size <= 2 * XLen)
8902 if (NeededArgGPRs > ArgGPRsLeft) {
8903 MustUseStack =
true;
8904 NeededArgGPRs = ArgGPRsLeft;
8907 ArgGPRsLeft -= NeededArgGPRs;
8912 Ty = EnumTy->getDecl()->getIntegerType();
8916 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
8917 return extendType(Ty);
8925 if (Size <= 2 * XLen) {
8933 }
else if (Alignment == 2 * XLen) {
8948 int ArgGPRsLeft = 2;
8966 std::pair<CharUnits, CharUnits> SizeAndAlign =
8970 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
8990 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
8992 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
8995 const auto *
Attr = FD->getAttr<RISCVInterruptAttr>();
9000 switch (
Attr->getInterrupt()) {
9001 case RISCVInterruptAttr::user: Kind =
"user";
break;
9002 case RISCVInterruptAttr::supervisor: Kind =
"supervisor";
break;
9003 case RISCVInterruptAttr::machine: Kind =
"machine";
break;
9006 auto *Fn = cast<llvm::Function>(GV);
9008 Fn->addFnAttr(
"interrupt", Kind);
9018 return getTriple().supportsCOMDAT();
9022 if (TheTargetCodeGenInfo)
9023 return *TheTargetCodeGenInfo;
9027 this->TheTargetCodeGenInfo.reset(
P);
9032 switch (Triple.getArch()) {
9034 return SetCGInfo(
new DefaultTargetCodeGenInfo(Types));
9036 case llvm::Triple::le32:
9037 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
9038 case llvm::Triple::mips:
9039 case llvm::Triple::mipsel:
9040 if (Triple.getOS() == llvm::Triple::NaCl)
9041 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
9042 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
true));
9044 case llvm::Triple::mips64:
9045 case llvm::Triple::mips64el:
9046 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
false));
9048 case llvm::Triple::avr:
9049 return SetCGInfo(
new AVRTargetCodeGenInfo(Types));
9051 case llvm::Triple::aarch64:
9052 case llvm::Triple::aarch64_be: {
9053 AArch64ABIInfo::ABIKind
Kind = AArch64ABIInfo::AAPCS;
9054 if (
getTarget().getABI() ==
"darwinpcs")
9055 Kind = AArch64ABIInfo::DarwinPCS;
9056 else if (Triple.isOSWindows())
9058 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
9060 return SetCGInfo(
new AArch64TargetCodeGenInfo(Types, Kind));
9063 case llvm::Triple::wasm32:
9064 case llvm::Triple::wasm64:
9065 return SetCGInfo(
new WebAssemblyTargetCodeGenInfo(Types));
9067 case llvm::Triple::arm:
9068 case llvm::Triple::armeb:
9069 case llvm::Triple::thumb:
9070 case llvm::Triple::thumbeb: {
9071 if (Triple.getOS() == llvm::Triple::Win32) {
9073 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
9076 ARMABIInfo::ABIKind
Kind = ARMABIInfo::AAPCS;
9078 if (ABIStr ==
"apcs-gnu")
9079 Kind = ARMABIInfo::APCS;
9080 else if (ABIStr ==
"aapcs16")
9081 Kind = ARMABIInfo::AAPCS16_VFP;
9082 else if (CodeGenOpts.FloatABI ==
"hard" ||
9083 (CodeGenOpts.FloatABI !=
"soft" &&
9084 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
9085 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
9086 Triple.getEnvironment() == llvm::Triple::EABIHF)))
9087 Kind = ARMABIInfo::AAPCS_VFP;
9089 return SetCGInfo(
new ARMTargetCodeGenInfo(Types, Kind));
9092 case llvm::Triple::ppc:
9094 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI ==
"soft"));
9095 case llvm::Triple::ppc64:
9096 if (Triple.isOSBinFormatELF()) {
9097 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv1;
9099 Kind = PPC64_SVR4_ABIInfo::ELFv2;
9101 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
9103 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9106 return SetCGInfo(
new PPC64TargetCodeGenInfo(Types));
9107 case llvm::Triple::ppc64le: {
9108 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
9109 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv2;
9111 Kind = PPC64_SVR4_ABIInfo::ELFv1;
9113 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
9115 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9119 case llvm::Triple::nvptx:
9120 case llvm::Triple::nvptx64:
9121 return SetCGInfo(
new NVPTXTargetCodeGenInfo(Types));
9123 case llvm::Triple::msp430:
9124 return SetCGInfo(
new MSP430TargetCodeGenInfo(Types));
9126 case llvm::Triple::riscv32:
9127 return SetCGInfo(
new RISCVTargetCodeGenInfo(Types, 32));
9128 case llvm::Triple::riscv64:
9129 return SetCGInfo(
new RISCVTargetCodeGenInfo(Types, 64));
9131 case llvm::Triple::systemz: {
9133 return SetCGInfo(
new SystemZTargetCodeGenInfo(Types, HasVector));
9136 case llvm::Triple::tce:
9137 case llvm::Triple::tcele:
9138 return SetCGInfo(
new TCETargetCodeGenInfo(Types));
9140 case llvm::Triple::x86: {
9141 bool IsDarwinVectorABI = Triple.isOSDarwin();
9142 bool RetSmallStructInRegABI =
9143 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
9144 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
9146 if (Triple.getOS() == llvm::Triple::Win32) {
9147 return SetCGInfo(
new WinX86_32TargetCodeGenInfo(
9148 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9149 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
9151 return SetCGInfo(
new X86_32TargetCodeGenInfo(
9152 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9153 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
9154 CodeGenOpts.FloatABI ==
"soft"));
9158 case llvm::Triple::x86_64: {
9162 ? X86AVXABILevel::AVX512
9165 switch (Triple.getOS()) {
9166 case llvm::Triple::Win32:
9167 return SetCGInfo(
new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
9168 case llvm::Triple::PS4:
9169 return SetCGInfo(
new PS4TargetCodeGenInfo(Types, AVXLevel));
9171 return SetCGInfo(
new X86_64TargetCodeGenInfo(Types, AVXLevel));
9174 case llvm::Triple::hexagon:
9175 return SetCGInfo(
new HexagonTargetCodeGenInfo(Types));
9176 case llvm::Triple::lanai:
9177 return SetCGInfo(
new LanaiTargetCodeGenInfo(Types));
9178 case llvm::Triple::r600:
9179 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
9180 case llvm::Triple::amdgcn:
9181 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
9182 case llvm::Triple::sparc:
9183 return SetCGInfo(
new SparcV8TargetCodeGenInfo(Types));
9184 case llvm::Triple::sparcv9:
9185 return SetCGInfo(
new SparcV9TargetCodeGenInfo(Types));
9186 case llvm::Triple::xcore:
9187 return SetCGInfo(
new XCoreTargetCodeGenInfo(Types));
9188 case llvm::Triple::spir:
9189 case llvm::Triple::spir64:
9190 return SetCGInfo(
new SPIRTargetCodeGenInfo(Types));
9201 llvm::Function *Invoke,
9203 auto *InvokeFT = Invoke->getFunctionType();
9205 for (
auto &
P : InvokeFT->params())
9206 ArgTys.push_back(
P);
9208 std::string Name = Invoke->getName().str() +
"_kernel";
9209 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9212 auto IP = CGF.
Builder.saveIP();
9215 Builder.SetInsertPoint(BB);
9217 for (
auto &A : F->args())
9219 Builder.CreateCall(Invoke, Args);
9220 Builder.CreateRetVoid();
9221 Builder.restoreIP(IP);
9233 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
9239 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
9240 auto *InvokeFT = Invoke->getFunctionType();
9249 ArgTys.push_back(BlockTy);
9250 ArgTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9251 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
9252 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9253 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9254 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9255 ArgNames.push_back(llvm::MDString::get(C,
"block_literal"));
9256 for (
unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
9257 ArgTys.push_back(InvokeFT->getParamType(I));
9258 ArgTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9259 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
9260 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9261 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9262 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9264 llvm::MDString::get(C, (Twine(
"local_arg") + Twine(I)).str()));
9266 std::string Name = Invoke->getName().str() +
"_kernel";
9267 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9270 F->addFnAttr(
"enqueued-block");
9271 auto IP = CGF.
Builder.saveIP();
9273 Builder.SetInsertPoint(BB);
9274 unsigned BlockAlign = CGF.
CGM.
getDataLayout().getPrefTypeAlignment(BlockTy);
9275 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
9276 BlockPtr->setAlignment(BlockAlign);
9277 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
9278 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
9280 Args.push_back(Cast);
9281 for (
auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
9283 Builder.CreateCall(Invoke, Args);
9284 Builder.CreateRetVoid();
9285 Builder.restoreIP(IP);
9287 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
9288 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
9289 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
9290 F->setMetadata(
"kernel_arg_base_type",
9291 llvm::MDNode::get(C, ArgBaseTypeNames));
9292 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
9294 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(C, ArgNames));
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
Ignore - Ignore the argument (treat as void).
bool isFloatingPoint() const
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Represents a function declaration or definition.
void setEffectiveCallingConvention(unsigned Value)
External linkage, which indicates that the entity can be referred to from other translation units...
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T -> getSizeExpr()))
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isBlockPointerType() const
CodeGenTypes & getTypes()
bool isMemberPointerType() const
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate. ...
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
FunctionType - C99 6.7.5.3 - Function Declarators.
llvm::ConstantInt * getSize(CharUnits N)
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isRealFloatingType() const
Floating point categories.
Extend - Valid only for integer argument types.
bool isRecordType() const
Decl - This represents one declaration (or definition), e.g.
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
CharUnits getPointerSize() const
const RecordType * getAsStructureType() const
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const llvm::DataLayout & getDataLayout() const
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
The base class of the type hierarchy.
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isZero() const
isZero - Test whether the quantity equals zero.
const TargetInfo & getTargetInfo() const
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Handles the type's qualifier before dispatching a call to handle specific type encodings.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
virtual ~TargetCodeGenInfo()
void setCanBeFlattened(bool Flatten)
QualType getElementType() const
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
unsigned getTypeAlign(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in bits.
ASTContext & getContext() const
Represents a variable declaration or definition.
LangAS getLangASFromTargetAS(unsigned TargetAS)
bool isEnumeralType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
bool supportsCOMDAT() const
LangAS
Defines the address space values used by the address space qualifier of QualType. ...
llvm::LLVMContext & getVMContext() const
void setCoerceToType(llvm::Type *T)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * getPointer() const
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Represents a struct/union/class.
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
static ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
CodeGen::CodeGenTypes & CGT
One of these records is kept for each identifier that is lexed.
Address getAddress() const
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
llvm::IntegerType * Int64Ty
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
field_range fields() const
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
Represents a member of a struct/union/class.
bool isReferenceType() const
CharUnits getTypeUnadjustedAlignInChars(QualType T) const
getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a type, in characters, before alignment adjustments.
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
static bool occupiesMoreThan(CodeGenTypes &cgt, ArrayRef< llvm::Type *> scalarTypes, unsigned maxAllRegisters)
Does the given lowering require more than the given number of registers when expanded?
ABIInfo(CodeGen::CodeGenTypes &cgt)
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal=true, bool Realign=false)
virtual StringRef getABI() const
Get the ABI currently in use.
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
bool getHasRegParm() const
bool isBitField() const
Determines whether this field is a bitfield.
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends structure and union types to Enc and adds encoding to cache.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
CharUnits - This is an opaque type for sizes expressed in character units.
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type...
CharUnits getAlignment() const
Return the alignment of this pointer.
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
const_arg_iterator arg_begin() const
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
field_iterator field_begin() const
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static ABIArgInfo getExpand()
CharUnits getPointerAlign() const
bool isFloat128Type() const
bool isScalarType() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
unsigned getTypeUnadjustedAlign(QualType T) const
Return the ABI-specified natural alignment of a (complete) type T, before alignment adjustments...
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor)
isTypeConstant - Determine whether an object of this type can be emitted as a constant.
ExtInfo withCallingConv(CallingConv cc) const
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
ContainsFloatAtOffset - Return true if the specified LLVM IR type has a float member at the specified...
static ABIArgInfo getSignExtend(QualType Ty, llvm::Type *T=nullptr)
CanQualType getReturnType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
static CharUnits One()
One - Construct a CharUnits quantity of one.
ASTContext & getContext() const
Represents a prototype with parameter type info, e.g.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
const TargetCodeGenInfo & getTargetCodeGenInfo()
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Helper function for appendRecordType().
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
Gets the linker options necessary to link a dependent library on this platform.
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
void setAddress(Address address)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
const llvm::fltSemantics & getLongDoubleFormat() const
Exposes information about the current target.
CodeGen::ABIArgInfo getNaturalAlignIndirect(QualType Ty, bool ByRef=true, bool Realign=false, llvm::Type *Padding=nullptr) const
A convenience method to return an indirect ABIArgInfo with an expected alignment equal to the ABI ali...
QualType getElementType() const
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorType::VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
field_iterator field_end() const
virtual bool classifyReturnType(CGFunctionInfo &FI) const =0
If the C++ ABI requires the given type be returned in a particular way, this method sets RetAI and re...
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isAnyComplexType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
The XCore ABI includes a type information section that communicates symbol type information to the li...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
EnumDecl * getDefinition() const
llvm::CallingConv::ID RuntimeCC
static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
llvm::LLVMContext & getLLVMContext()
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
llvm::IntegerType * Int32Ty
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty, bool Realign=false) const
const CodeGenOptions & getCodeGenOpts() const
bool canHaveCoerceToType() const
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
bool getIndirectByVal() const
static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Represents a GCC generic vector type.
ArraySizeModifier getSizeModifier() const
virtual unsigned getSizeOfUnwindException() const
Determines the size of struct _Unwind_Exception on this platform, in 8-bit units. ...
Implements C++ ABI-specific semantic analysis functions.
const TargetInfo & getTarget() const
const LangOptions & getLangOpts() const
ASTContext & getContext() const
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Attempt to be ABI-compatible with code generated by Clang 3.8.x (SVN r257626).
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
bool isConstQualified() const
Determine whether this type is const-qualified.
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Pass it as a pointer to temporary memory.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
bool isStructureOrClassType() const
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
Appends type's qualifier to Enc.
static Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
QualType getCanonicalType() const
bool isBuiltinType() const
Helper methods to distinguish type categories.
QualType getReturnType() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums...
bool isSRetAfterThis() const
LangAS getAddressSpace() const
Return the address space of this type.
unsigned getRegParm() const
const TargetInfo & getTarget() const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
isEmptyRecord - Return true iff a structure contains only empty fields.
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a function encoding to Enc, calling appendType for the return type and the arguments...
SyncScope
Defines synch scope values used internally by clang.
const llvm::DataLayout & getDataLayout() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const ConstantArrayType * getAsConstantArrayType(QualType T) const
const_arg_iterator arg_end() const
CoerceAndExpand - Only valid for aggregate argument types.
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
bool isMemberFunctionPointerType() const
llvm::LLVMContext & getLLVMContext()
bool canPassInRegisters() const
Determine whether this class can be passed in registers.
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
bool isTargetAddressSpace(LangAS AS)
EnumDecl * getDecl() const
bool isVectorType() const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues, like target-specific attributes, builtins and so on.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
X86AVXABILevel
The AVX ABI level for X86 targets.
llvm::CallingConv::ID getRuntimeCC() const
Return the calling convention to use for system runtime functions.
bool hasFlexibleArrayMember() const
static llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
StringRef getName() const
Return the actual identifier string.
const TargetInfo & getTarget() const
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
bool isFloat16Type() const
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA...
ExtInfo getExtInfo() const
A refining implementation of ABIInfo for targets that support swiftcall.
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
virtual llvm::Function * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Value *BlockLiteral) const
Create an OpenCL kernel for an enqueued block.
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::IntegerType * IntPtrTy
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
llvm::Module & getModule() const
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
unsigned getIntWidth(QualType T) const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual llvm::Optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory...
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Complex values, per C99 6.2.5p11.
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize, const llvm::Twine &Name="")
Given addr = [n x T]* ...
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Implements C++ ABI-specific code generation functions.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
llvm::PointerType * Int8PtrTy
CodeGen::CGCXXABI & getCXXABI() const
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
Expand - Only valid for aggregate argument types.
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
virtual bool hasFloat128Type() const
Determine whether the __float128 type is supported on this target.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
Represents a base class of a C++ class.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
ASTContext & getContext() const
Pass it on the stack using its defined layout.
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const
Get the syncscope used in LLVM IR.
CallingConv getCallConv() const
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Represents a C++ struct/union/class.
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
llvm::Type * ConvertType(QualType T)
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like 'int'.
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, std::pair< CharUnits, CharUnits > ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions...
__DEVICE__ int max(int __a, int __b)
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
bool isPointerType() const
unsigned getNumRequiredArgs() const
__DEVICE__ int min(int __a, int __b)
unsigned getDirectOffset() const
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isFloatingType() const
LValue - This represents an lvalue references.
llvm::Type * getCoerceToType() const
void setInAllocaSRet(bool SRet)
unsigned getTargetAddressSpace(QualType T) const
RecordArgABI
Specify how one should pass an argument of a record type.
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
CallArgList - Type for representing both the value and type of arguments in a call.
const LangOptions & getLangOpts() const
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
Represents the canonical version of C arrays with a specified constant size.
bool getIndirectRealign() const
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
Attr - This represents one attribute.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr *> VL, ArrayRef< Expr *> PL, ArrayRef< Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate)
Creates clause with a list of variables VL and a linear step Step.
const CodeGenOptions & getCodeGenOpts() const
const llvm::Triple & getTriple() const