25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringSwitch.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/Support/raw_ostream.h" 34 using namespace clang;
35 using namespace CodeGen;
53 llvm::LLVMContext &LLVMContext) {
57 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
58 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
68 for (
unsigned I = FirstIndex; I <= LastIndex; ++I) {
70 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
84 ByRef, Realign, Padding);
115 unsigned maxAllRegisters) {
116 unsigned intCount = 0, fpCount = 0;
118 if (
type->isPointerTy()) {
120 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
122 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
124 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
129 return (intCount + fpCount > maxAllRegisters);
134 unsigned numElts)
const {
164 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
165 !RT->getDecl()->canPassInRegisters()) {
178 if (UD->
hasAttr<TransparentUnionAttr>()) {
179 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
217 uint64_t Members)
const {
222 raw_ostream &OS = llvm::errs();
223 OS <<
"(ABIArgInfo Kind=";
226 OS <<
"Direct Type=";
239 OS <<
"InAlloca Offset=" << getInAllocaFieldIndex();
242 OS <<
"Indirect Align=" << getIndirectAlign().getQuantity()
243 <<
" ByVal=" << getIndirectByVal()
244 <<
" Realign=" << getIndirectRealign();
249 case CoerceAndExpand:
250 OS <<
"CoerceAndExpand Type=";
251 getCoerceAndExpandType()->print(OS);
264 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
266 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
268 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
270 Ptr->getName() +
".aligned");
294 bool AllowHigherAlign) {
304 if (AllowHigherAlign && DirectAlign > SlotSize) {
321 !DirectTy->isStructTy()) {
344 std::pair<CharUnits, CharUnits> ValueInfo,
346 bool AllowHigherAlign) {
353 DirectSize = ValueInfo.first;
354 DirectAlign = ValueInfo.second;
360 DirectTy = DirectTy->getPointerTo(0);
363 DirectSize, DirectAlign,
376 Address Addr1, llvm::BasicBlock *Block1,
377 Address Addr2, llvm::BasicBlock *Block2,
378 const llvm::Twine &Name =
"") {
380 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(Addr1.
getType(), 2, Name);
431 return llvm::CallingConv::SPIR_KERNEL;
435 llvm::PointerType *T,
QualType QT)
const {
436 return llvm::ConstantPointerNull::get(T);
443 "Address space agnostic languages only");
452 if (
auto *C = dyn_cast<llvm::Constant>(Src))
453 return performAddrSpaceCast(CGF.
CGM, C, SrcAddr, DestAddr, DestTy);
463 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
468 return C.getOrInsertSyncScopeID(
"");
486 if (AT->getSize() == 0)
488 FT = AT->getElementType();
499 if (isa<CXXRecordDecl>(RT->
getDecl()))
517 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
518 for (
const auto &I : CXXRD->bases())
522 for (
const auto *I : RD->
fields())
545 const Type *Found =
nullptr;
548 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
549 for (
const auto &I : CXXRD->bases()) {
567 for (
const auto *FD : RD->
fields()) {
581 if (AT->getSize().getZExtValue() != 1)
583 FT = AT->getElementType();
619 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
622 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
631 return Address(Addr, TyAlignForABI);
634 "Unexpected ArgInfo Kind in generic VAArg emitter!");
637 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
639 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
641 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
643 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
656 class DefaultABIInfo :
public ABIInfo {
667 I.info = classifyArgumentType(I.type);
696 Ty = EnumTy->getDecl()->getIntegerType();
711 RetTy = EnumTy->getDecl()->getIntegerType();
724 DefaultABIInfo defaultInfo;
741 Arg.info = classifyArgumentType(Arg.type);
748 bool asReturnValue)
const override {
752 bool isSwiftErrorInRegister()
const override {
762 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
765 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
766 if (
const auto *
Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
767 llvm::Function *Fn = cast<llvm::Function>(GV);
769 B.addAttribute(
"wasm-import-module",
Attr->getImportModule());
770 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
772 if (
const auto *
Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
773 llvm::Function *Fn = cast<llvm::Function>(GV);
775 B.addAttribute(
"wasm-import-name",
Attr->getImportName());
776 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
780 if (
auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
781 llvm::Function *Fn = cast<llvm::Function>(GV);
782 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
783 Fn->addFnAttr(
"no-prototype");
808 return defaultInfo.classifyArgumentType(Ty);
828 return defaultInfo.classifyReturnType(RetTy);
846 class PNaClABIInfo :
public ABIInfo {
891 Ty = EnumTy->getDecl()->getIntegerType();
911 RetTy = EnumTy->getDecl()->getIntegerType();
920 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
921 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
922 IRType->getScalarSizeInBits() != 64;
926 StringRef Constraint,
928 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
929 .Cases(
"y",
"&y",
"^Ym",
true)
931 if (IsMMXCons && Ty->isVectorTy()) {
932 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
948 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
949 if (BT->getKind() == BuiltinType::LongDouble) {
951 &llvm::APFloat::x87DoubleExtended())
960 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
968 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
969 return NumMembers <= 4;
986 CCState(
unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
990 unsigned FreeSSERegs;
995 VectorcallMaxParamNumAsReg = 6
1005 static const unsigned MinABIStackAlignInBytes = 4;
1007 bool IsDarwinVectorABI;
1008 bool IsRetSmallStructInRegABI;
1009 bool IsWin32StructABI;
1010 bool IsSoftFloatABI;
1012 unsigned DefaultNumRegisterParameters;
1014 static bool isRegisterSize(
unsigned Size) {
1015 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1020 return isX86VectorTypeForVectorCall(
getContext(), Ty);
1024 uint64_t NumMembers)
const override {
1026 return isX86VectorCallAggregateSmallEnough(NumMembers);
1038 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
1046 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
1048 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
1049 bool &NeedsPadding)
const;
1050 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
1052 bool canExpandIndirectArgument(
QualType Ty)
const;
1062 bool &UsedInAlloca)
const;
1071 bool RetSmallStructInRegABI,
bool Win32StructABI,
1072 unsigned NumRegisterParameters,
bool SoftFloatABI)
1073 :
SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1074 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1075 IsWin32StructABI(Win32StructABI),
1076 IsSoftFloatABI(SoftFloatABI),
1078 DefaultNumRegisterParameters(NumRegisterParameters) {}
1081 bool asReturnValue)
const override {
1089 bool isSwiftErrorInRegister()
const override {
1098 bool RetSmallStructInRegABI,
bool Win32StructABI,
1099 unsigned NumRegisterParameters,
bool SoftFloatABI)
1101 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1102 NumRegisterParameters, SoftFloatABI)) {}
1104 static bool isStructReturnInRegABI(
1107 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1120 StringRef Constraint,
1122 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1126 std::string &Constraints,
1127 std::vector<llvm::Type *> &ResultRegTypes,
1128 std::vector<llvm::Type *> &ResultTruncRegTypes,
1129 std::vector<LValue> &ResultRegDests,
1130 std::string &AsmString,
1131 unsigned NumOutputs)
const override;
1135 unsigned Sig = (0xeb << 0) |
1139 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1142 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
1143 return "movl\t%ebp, %ebp" 1144 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1159 unsigned NumNewOuts,
1160 std::string &AsmString) {
1162 llvm::raw_string_ostream OS(Buf);
1164 while (Pos < AsmString.size()) {
1165 size_t DollarStart = AsmString.find(
'$', Pos);
1166 if (DollarStart == std::string::npos)
1167 DollarStart = AsmString.size();
1168 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1169 if (DollarEnd == std::string::npos)
1170 DollarEnd = AsmString.size();
1171 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1173 size_t NumDollars = DollarEnd - DollarStart;
1174 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1176 size_t DigitStart = Pos;
1177 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1178 if (DigitEnd == std::string::npos)
1179 DigitEnd = AsmString.size();
1180 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1181 unsigned OperandIndex;
1182 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1183 if (OperandIndex >= FirstIn)
1184 OperandIndex += NumNewOuts;
1192 AsmString = std::move(OS.str());
1196 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1198 std::vector<llvm::Type *> &ResultRegTypes,
1199 std::vector<llvm::Type *> &ResultTruncRegTypes,
1200 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1201 unsigned NumOutputs)
const {
1206 if (!Constraints.empty())
1208 if (RetWidth <= 32) {
1209 Constraints +=
"={eax}";
1210 ResultRegTypes.push_back(CGF.
Int32Ty);
1213 Constraints +=
"=A";
1214 ResultRegTypes.push_back(CGF.
Int64Ty);
1219 ResultTruncRegTypes.push_back(CoerceTy);
1223 CoerceTy->getPointerTo()));
1224 ResultRegDests.push_back(ReturnSlot);
1231 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1237 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1243 if (Size == 64 || Size == 128)
1258 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1262 if (!RT)
return false;
1274 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1283 Ty = CTy->getElementType();
1293 return Size == 32 || Size == 64;
1298 for (
const auto *FD : RD->
fields()) {
1308 if (FD->isBitField())
1333 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1340 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1341 if (!IsWin32StructABI) {
1344 if (!CXXRD->isCLike())
1348 if (CXXRD->isDynamicClass())
1365 if (State.FreeRegs) {
1374 CCState &State)
const {
1379 uint64_t NumElts = 0;
1380 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1381 State.CC == llvm::CallingConv::X86_RegCall) &&
1389 if (IsDarwinVectorABI) {
1401 if ((Size == 8 || Size == 16 || Size == 32) ||
1402 (Size == 64 && VT->getNumElements() == 1))
1406 return getIndirectReturnResult(RetTy, State);
1415 if (RT->getDecl()->hasFlexibleArrayMember())
1416 return getIndirectReturnResult(RetTy, State);
1421 return getIndirectReturnResult(RetTy, State);
1429 if (shouldReturnTypeInRegister(RetTy,
getContext())) {
1438 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1439 || SeltTy->hasPointerRepresentation())
1447 return getIndirectReturnResult(RetTy, State);
1452 RetTy = EnumTy->getDecl()->getIntegerType();
1469 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1470 for (
const auto &I : CXXRD->bases())
1474 for (
const auto *i : RD->
fields()) {
1487 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1488 unsigned Align)
const {
1491 if (Align <= MinABIStackAlignInBytes)
1495 if (!IsDarwinVectorABI) {
1497 return MinABIStackAlignInBytes;
1505 return MinABIStackAlignInBytes;
1509 CCState &State)
const {
1511 if (State.FreeRegs) {
1521 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1522 if (StackAlign == 0)
1527 bool Realign = TypeAlign > StackAlign;
1532 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1539 if (K == BuiltinType::Float || K == BuiltinType::Double)
1545 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1546 if (!IsSoftFloatABI) {
1547 Class C = classify(Ty);
1553 unsigned SizeInRegs = (Size + 31) / 32;
1555 if (SizeInRegs == 0)
1559 if (SizeInRegs > State.FreeRegs) {
1568 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1572 State.FreeRegs -= SizeInRegs;
1576 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1578 bool &NeedsPadding)
const {
1585 NeedsPadding =
false;
1588 if (!updateFreeRegs(Ty, State))
1594 if (State.CC == llvm::CallingConv::X86_FastCall ||
1595 State.CC == llvm::CallingConv::X86_VectorCall ||
1596 State.CC == llvm::CallingConv::X86_RegCall) {
1597 if (
getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1598 NeedsPadding =
true;
1606 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1607 if (!updateFreeRegs(Ty, State))
1613 if (State.CC == llvm::CallingConv::X86_FastCall ||
1614 State.CC == llvm::CallingConv::X86_VectorCall ||
1615 State.CC == llvm::CallingConv::X86_RegCall) {
1627 CCState &State)
const {
1637 return getIndirectResult(Ty,
false, State);
1647 uint64_t NumElts = 0;
1648 if (State.CC == llvm::CallingConv::X86_RegCall &&
1651 if (State.FreeSSERegs >= NumElts) {
1652 State.FreeSSERegs -= NumElts;
1657 return getIndirectResult(Ty,
false, State);
1664 return getIndirectResult(Ty,
true, State);
1671 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1672 bool NeedsPadding =
false;
1674 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1677 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1683 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1691 if (
getContext().getTypeSize(Ty) <= 4 * 32 &&
1692 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1694 State.CC == llvm::CallingConv::X86_FastCall ||
1695 State.CC == llvm::CallingConv::X86_VectorCall ||
1696 State.CC == llvm::CallingConv::X86_RegCall,
1699 return getIndirectResult(Ty,
true, State);
1705 if (IsDarwinVectorABI) {
1707 if ((Size == 8 || Size == 16 || Size == 32) ||
1708 (Size == 64 && VT->getNumElements() == 1))
1721 Ty = EnumTy->getDecl()->getIntegerType();
1723 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1736 void X86_32ABIInfo::computeVectorCallArgs(
CGFunctionInfo &FI, CCState &State,
1737 bool &UsedInAlloca)
const {
1751 uint64_t NumElts = 0;
1755 if (State.FreeSSERegs >= NumElts) {
1756 State.FreeSSERegs -= NumElts;
1768 uint64_t NumElts = 0;
1774 if (State.FreeSSERegs >= NumElts) {
1775 State.FreeSSERegs -= NumElts;
1776 I.info = getDirectX86Hva();
1778 I.info = getIndirectResult(Ty,
false, State);
1780 }
else if (!IsHva) {
1792 else if (State.CC == llvm::CallingConv::X86_FastCall)
1794 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1796 State.FreeSSERegs = 6;
1799 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1801 State.FreeSSERegs = 8;
1803 State.FreeRegs = DefaultNumRegisterParameters;
1810 if (State.FreeRegs) {
1821 bool UsedInAlloca =
false;
1822 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1823 computeVectorCallArgs(FI, State, UsedInAlloca);
1835 rewriteWithInAlloca(FI);
1845 assert(StackOffset.
isMultipleOf(FieldAlign) &&
"unaligned inalloca struct");
1852 StackOffset = FieldEnd.
alignTo(FieldAlign);
1853 if (StackOffset != FieldEnd) {
1854 CharUnits NumBytes = StackOffset - FieldEnd;
1856 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1857 FrameFields.push_back(Ty);
1882 llvm_unreachable(
"invalid enum");
1885 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1886 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1903 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1910 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1920 for (; I != E; ++I) {
1922 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1940 getTypeStackAlignInBytes(Ty,
TypeInfo.second.getQuantity()));
1947 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1949 assert(Triple.getArch() == llvm::Triple::x86);
1951 switch (Opts.getStructReturnConvention()) {
1960 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1963 switch (Triple.getOS()) {
1964 case llvm::Triple::DragonFly:
1965 case llvm::Triple::FreeBSD:
1966 case llvm::Triple::OpenBSD:
1967 case llvm::Triple::Win32:
1974 void X86_32TargetCodeGenInfo::setTargetAttributes(
1976 if (GV->isDeclaration())
1978 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1979 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1980 llvm::Function *Fn = cast<llvm::Function>(GV);
1981 Fn->addFnAttr(
"stackrealign");
1983 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1984 llvm::Function *Fn = cast<llvm::Function>(GV);
1985 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1990 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2013 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
2040 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
2042 case X86AVXABILevel::AVX512:
2044 case X86AVXABILevel::AVX:
2049 llvm_unreachable(
"Unknown AVXLevel");
2074 static Class merge(Class Accum, Class Field);
2090 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
2116 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2117 bool isNamedArg)
const;
2121 unsigned IROffset,
QualType SourceTy,
2122 unsigned SourceOffset)
const;
2124 unsigned IROffset,
QualType SourceTy,
2125 unsigned SourceOffset)
const;
2141 unsigned &neededInt,
unsigned &neededSSE,
2142 bool isNamedArg)
const;
2145 unsigned &NeededSSE)
const;
2148 unsigned &NeededSSE)
const;
2150 bool IsIllegalVectorType(
QualType Ty)
const;
2157 bool honorsRevision0_98()
const {
2163 bool classifyIntegerMMXAsSSE()
const {
2165 if (
getContext().getLangOpts().getClangABICompat() <=
2170 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2172 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2180 bool Has64BitPointers;
2185 Has64BitPointers(CGT.
getDataLayout().getPointerSize(0) == 8) {
2189 unsigned neededInt, neededSSE;
2195 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2196 return (vectorTy->getBitWidth() > 128);
2208 bool has64BitPointers()
const {
2209 return Has64BitPointers;
2213 bool asReturnValue)
const override {
2216 bool isSwiftErrorInRegister()
const override {
2226 IsMingw64(
getTarget().getTriple().isWindowsGNUEnvironment()) {}
2235 return isX86VectorTypeForVectorCall(
getContext(), Ty);
2239 uint64_t NumMembers)
const override {
2241 return isX86VectorCallAggregateSmallEnough(NumMembers);
2245 bool asReturnValue)
const override {
2249 bool isSwiftErrorInRegister()
const override {
2255 bool IsVectorCall,
bool IsRegCall)
const;
2258 void computeVectorCallArgs(
CGFunctionInfo &FI,
unsigned FreeSSERegs,
2259 bool IsVectorCall,
bool IsRegCall)
const;
2269 const X86_64ABIInfo &getABIInfo()
const {
2288 StringRef Constraint,
2290 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2293 bool isNoProtoCallVariadic(
const CallArgList &args,
2302 bool HasAVXType =
false;
2303 for (CallArgList::const_iterator
2304 it = args.begin(), ie = args.end(); it != ie; ++it) {
2305 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2320 unsigned Sig = (0xeb << 0) |
2324 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2327 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2329 if (GV->isDeclaration())
2331 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2332 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2333 llvm::Function *Fn = cast<llvm::Function>(GV);
2334 Fn->addFnAttr(
"stackrealign");
2336 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2337 llvm::Function *Fn = cast<llvm::Function>(GV);
2338 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2344 class PS4TargetCodeGenInfo :
public X86_64TargetCodeGenInfo {
2347 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2349 void getDependentLibraryOption(llvm::StringRef Lib,
2353 if (Lib.find(
" ") != StringRef::npos)
2354 Opt +=
"\"" + Lib.str() +
"\"";
2360 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2364 bool Quote = (Lib.find(
" ") != StringRef::npos);
2365 std::string ArgStr = Quote ?
"\"" :
"";
2367 if (!Lib.endswith_lower(
".lib") && !Lib.endswith_lower(
".a"))
2369 ArgStr += Quote ?
"\"" :
"";
2373 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2376 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2377 unsigned NumRegisterParameters)
2378 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2379 Win32StructABI, NumRegisterParameters,
false) {}
2381 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2384 void getDependentLibraryOption(llvm::StringRef Lib,
2386 Opt =
"/DEFAULTLIB:";
2387 Opt += qualifyWindowsLibrary(Lib);
2390 void getDetectMismatchOption(llvm::StringRef Name,
2391 llvm::StringRef
Value,
2393 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2397 static void addStackProbeTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2399 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2402 Fn->addFnAttr(
"stack-probe-size",
2405 Fn->addFnAttr(
"no-stack-arg-probe");
2409 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2411 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2412 if (GV->isDeclaration())
2414 addStackProbeTargetAttributes(D, GV, CGM);
2423 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2440 void getDependentLibraryOption(llvm::StringRef Lib,
2442 Opt =
"/DEFAULTLIB:";
2443 Opt += qualifyWindowsLibrary(Lib);
2446 void getDetectMismatchOption(llvm::StringRef Name,
2447 llvm::StringRef
Value,
2449 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2453 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2456 if (GV->isDeclaration())
2458 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2459 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2460 llvm::Function *Fn = cast<llvm::Function>(GV);
2461 Fn->addFnAttr(
"stackrealign");
2463 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2464 llvm::Function *Fn = cast<llvm::Function>(GV);
2465 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2469 addStackProbeTargetAttributes(D, GV, CGM);
2473 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2498 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2500 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2502 if (Hi == SSEUp && Lo != SSE)
2506 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2530 assert((Accum != Memory && Accum != ComplexX87) &&
2531 "Invalid accumulated classification during merge.");
2532 if (Accum == Field || Field == NoClass)
2534 if (Field == Memory)
2536 if (Accum == NoClass)
2540 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2541 Accum == X87 || Accum == X87Up)
2546 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
2547 Class &Lo, Class &Hi,
bool isNamedArg)
const {
2558 Class &Current = OffsetBase < 64 ? Lo : Hi;
2564 if (k == BuiltinType::Void) {
2566 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2569 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2571 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2573 }
else if (k == BuiltinType::LongDouble) {
2575 if (LDF == &llvm::APFloat::IEEEquad()) {
2578 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2581 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
2584 llvm_unreachable(
"unexpected long double representation!");
2593 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2604 if (Has64BitPointers) {
2611 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2612 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2613 if (EB_FuncPtr != EB_ThisAdj) {
2627 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2636 uint64_t EB_Lo = (OffsetBase) / 64;
2637 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2640 }
else if (Size == 64) {
2641 QualType ElementType = VT->getElementType();
2650 if (!classifyIntegerMMXAsSSE() &&
2661 if (OffsetBase && OffsetBase != 64)
2663 }
else if (Size == 128 ||
2664 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2692 else if (Size <= 128)
2700 if (LDF == &llvm::APFloat::IEEEquad())
2702 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2703 Current = ComplexX87;
2704 else if (LDF == &llvm::APFloat::IEEEdouble())
2707 llvm_unreachable(
"unexpected long double representation!");
2712 uint64_t EB_Real = (OffsetBase) / 64;
2714 if (Hi == NoClass && EB_Real != EB_Imag)
2734 if (OffsetBase %
getContext().getTypeAlign(AT->getElementType()))
2741 uint64_t ArraySize = AT->getSize().getZExtValue();
2748 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2751 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
2752 Class FieldLo, FieldHi;
2753 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2754 Lo = merge(Lo, FieldLo);
2755 Hi = merge(Hi, FieldHi);
2756 if (Lo == Memory || Hi == Memory)
2760 postMerge(Size, Lo, Hi);
2761 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2791 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2792 for (
const auto &I : CXXRD->bases()) {
2793 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2794 "Unexpected base class!");
2796 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2803 Class FieldLo, FieldHi;
2806 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2807 Lo = merge(Lo, FieldLo);
2808 Hi = merge(Hi, FieldHi);
2809 if (Lo == Memory || Hi == Memory) {
2810 postMerge(Size, Lo, Hi);
2819 i != e; ++i, ++idx) {
2821 bool BitField = i->isBitField();
2824 if (BitField && i->isUnnamedBitfield())
2834 if (Size > 128 && (Size !=
getContext().getTypeSize(i->getType()) ||
2835 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2837 postMerge(Size, Lo, Hi);
2841 if (!BitField && Offset %
getContext().getTypeAlign(i->getType())) {
2843 postMerge(Size, Lo, Hi);
2853 Class FieldLo, FieldHi;
2859 assert(!i->isUnnamedBitfield());
2861 uint64_t Size = i->getBitWidthValue(
getContext());
2863 uint64_t EB_Lo = Offset / 64;
2864 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2867 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2872 FieldHi = EB_Hi ?
Integer : NoClass;
2875 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2876 Lo = merge(Lo, FieldLo);
2877 Hi = merge(Hi, FieldHi);
2878 if (Lo == Memory || Hi == Memory)
2882 postMerge(Size, Lo, Hi);
2892 Ty = EnumTy->getDecl()->getIntegerType();
2901 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2904 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2905 if (Size <= 64 || Size > LargestVector)
2913 unsigned freeIntRegs)
const {
2925 Ty = EnumTy->getDecl()->getIntegerType();
2959 if (freeIntRegs == 0) {
2964 if (Align == 8 && Size <= 64)
2981 if (isa<llvm::VectorType>(IRType) ||
2982 IRType->getTypeID() == llvm::Type::FP128TyID)
2987 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2990 return llvm::VectorType::get(llvm::Type::getDoubleTy(
getVMContext()),
3006 unsigned TySize = (unsigned)Context.
getTypeSize(Ty);
3007 if (TySize <= StartBit)
3011 unsigned EltSize = (unsigned)Context.
getTypeSize(AT->getElementType());
3012 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3015 for (
unsigned i = 0; i != NumElts; ++i) {
3017 unsigned EltOffset = i*EltSize;
3018 if (EltOffset >= EndBit)
break;
3020 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3022 EndBit-EltOffset, Context))
3034 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3035 for (
const auto &I : CXXRD->bases()) {
3036 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3037 "Unexpected base class!");
3039 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
3043 if (BaseOffset >= EndBit)
continue;
3045 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3047 EndBit-BaseOffset, Context))
3058 i != e; ++i, ++idx) {
3062 if (FieldOffset >= EndBit)
break;
3064 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3083 const llvm::DataLayout &TD) {
3085 if (IROffset == 0 && IRType->isFloatTy())
3089 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3090 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3091 unsigned Elt = SL->getElementContainingOffset(IROffset);
3092 IROffset -= SL->getElementOffset(Elt);
3097 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3099 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3100 IROffset -= IROffset/EltSize*EltSize;
3111 GetSSETypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3112 QualType SourceTy,
unsigned SourceOffset)
const {
3125 return llvm::VectorType::get(llvm::Type::getFloatTy(
getVMContext()), 2);
3146 GetINTEGERTypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3147 QualType SourceTy,
unsigned SourceOffset)
const {
3150 if (IROffset == 0) {
3152 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3153 IRType->isIntegerTy(64))
3162 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3163 IRType->isIntegerTy(32) ||
3164 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3165 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3166 cast<llvm::IntegerType>(IRType)->getBitWidth();
3174 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3176 const llvm::StructLayout *SL =
getDataLayout().getStructLayout(STy);
3177 if (IROffset < SL->getSizeInBytes()) {
3178 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3179 IROffset -= SL->getElementOffset(FieldIdx);
3181 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3182 SourceTy, SourceOffset);
3186 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3189 unsigned EltOffset = IROffset/EltSize*EltSize;
3190 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3196 unsigned TySizeInBytes =
3199 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
3204 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3215 const llvm::DataLayout &TD) {
3220 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3221 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3222 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3223 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
3235 if (Lo->isFloatTy())
3236 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3238 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3239 &&
"Invalid/unknown lo type");
3240 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3244 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3247 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3248 "Invalid x86-64 argument pair!");
3256 X86_64ABIInfo::Class Lo, Hi;
3257 classify(RetTy, 0, Lo, Hi,
true);
3260 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3261 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3270 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3271 "Unknown missing lo part");
3276 llvm_unreachable(
"Invalid classification for lo word.");
3281 return getIndirectReturnResult(RetTy);
3286 ResType = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3290 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3293 RetTy = EnumTy->getDecl()->getIntegerType();
3304 ResType = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3317 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3318 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(
getVMContext()),
3329 llvm_unreachable(
"Invalid classification for hi word.");
3336 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3341 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3352 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3353 ResType = GetByteVectorType(RetTy);
3364 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3381 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
3387 X86_64ABIInfo::Class Lo, Hi;
3388 classify(Ty, 0, Lo, Hi, isNamedArg);
3392 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3393 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3404 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3405 "Unknown missing lo part");
3418 return getIndirectResult(Ty, freeIntRegs);
3422 llvm_unreachable(
"Invalid classification for lo word.");
3435 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3438 Ty = EnumTy->getDecl()->getIntegerType();
3452 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3466 llvm_unreachable(
"Invalid classification for hi word.");
3468 case NoClass:
break;
3473 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(Ty), 8, Ty, 8);
3495 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3496 ResType = GetByteVectorType(Ty);
3510 X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
3511 unsigned &NeededSSE)
const {
3513 assert(RT &&
"classifyRegCallStructType only valid with struct types");
3515 if (RT->getDecl()->hasFlexibleArrayMember())
3516 return getIndirectReturnResult(Ty);
3519 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3520 if (CXXRD->isDynamicClass()) {
3521 NeededInt = NeededSSE = 0;
3522 return getIndirectReturnResult(Ty);
3525 for (
const auto &I : CXXRD->bases())
3526 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3528 NeededInt = NeededSSE = 0;
3529 return getIndirectReturnResult(Ty);
3534 for (
const auto *FD : RT->getDecl()->fields()) {
3535 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3536 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3538 NeededInt = NeededSSE = 0;
3539 return getIndirectReturnResult(Ty);
3542 unsigned LocalNeededInt, LocalNeededSSE;
3544 LocalNeededSSE,
true)
3546 NeededInt = NeededSSE = 0;
3547 return getIndirectReturnResult(Ty);
3549 NeededInt += LocalNeededInt;
3550 NeededSSE += LocalNeededSSE;
3558 unsigned &NeededInt,
3559 unsigned &NeededSSE)
const {
3564 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3573 if (CallingConv == llvm::CallingConv::Win64) {
3574 WinX86_64ABIInfo Win64ABIInfo(
CGT);
3575 Win64ABIInfo.computeInfo(FI);
3579 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3582 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3583 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3584 unsigned NeededInt, NeededSSE;
3590 classifyRegCallStructType(FI.
getReturnType(), NeededInt, NeededSSE);
3591 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3592 FreeIntRegs -= NeededInt;
3593 FreeSSERegs -= NeededSSE;
3622 it != ie; ++it, ++ArgNo) {
3623 bool IsNamedArg = ArgNo < NumRequiredArgs;
3625 if (IsRegCall && it->type->isStructureOrClassType())
3626 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3629 NeededSSE, IsNamedArg);
3635 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3636 FreeIntRegs -= NeededInt;
3637 FreeSSERegs -= NeededSSE;
3639 it->info = getIndirectResult(it->type, FreeIntRegs);
3665 llvm::PointerType::getUnqual(LTy));
3674 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3675 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
3676 "overflow_arg_area.next");
3692 unsigned neededInt, neededSSE;
3700 if (!neededInt && !neededSSE)
3716 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3722 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3723 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3732 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3733 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3734 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3740 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3762 if (neededInt && neededSSE) {
3764 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3768 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3771 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3772 "Unexpected ABI info for mixed regs");
3773 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3774 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3777 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3778 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3797 }
else if (neededInt) {
3803 std::pair<CharUnits, CharUnits> SizeAlign =
3805 uint64_t TySize = SizeAlign.first.getQuantity();
3816 }
else if (neededSSE == 1) {
3821 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3840 RegAddrLo, ST->getStructElementType(0)));
3844 RegAddrHi, ST->getStructElementType(1)));
3888 WinX86_64ABIInfo::reclassifyHvaArgType(
QualType Ty,
unsigned &FreeSSERegs,
3891 const Type *
Base =
nullptr;
3892 uint64_t NumElts = 0;
3896 FreeSSERegs -= NumElts;
3897 return getDirectX86Hva();
3903 bool IsReturnType,
bool IsVectorCall,
3904 bool IsRegCall)
const {
3910 Ty = EnumTy->getDecl()->getIntegerType();
3913 uint64_t Width = Info.
Width;
3918 if (!IsReturnType) {
3928 const Type *
Base =
nullptr;
3929 uint64_t NumElts = 0;
3932 if ((IsVectorCall || IsRegCall) &&
3935 if (FreeSSERegs >= NumElts) {
3936 FreeSSERegs -= NumElts;
3942 }
else if (IsVectorCall) {
3943 if (FreeSSERegs >= NumElts &&
3945 FreeSSERegs -= NumElts;
3947 }
else if (IsReturnType) {
3960 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3967 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3975 switch (BT->getKind()) {
3976 case BuiltinType::Bool:
3981 case BuiltinType::LongDouble:
3986 if (LDF == &llvm::APFloat::x87DoubleExtended())
3991 case BuiltinType::Int128:
3992 case BuiltinType::UInt128:
4002 llvm::VectorType::get(llvm::Type::getInt64Ty(
getVMContext()), 2));
4013 unsigned FreeSSERegs,
4015 bool IsRegCall)
const {
4020 if (Count < VectorcallMaxParamNumAsReg)
4021 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4025 unsigned ZeroSSERegsAvail = 0;
4026 I.info = classify(I.type, ZeroSSERegsAvail,
false,
4027 IsVectorCall, IsRegCall);
4033 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4042 unsigned FreeSSERegs = 0;
4046 }
else if (IsRegCall) {
4053 IsVectorCall, IsRegCall);
4058 }
else if (IsRegCall) {
4064 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4067 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4075 bool IsIndirect =
false;
4081 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4093 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
4094 bool IsSoftFloatABI;
4100 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4124 Ty = CTy->getElementType();
4132 const Type *AlignTy =
nullptr;
4149 if (
getTarget().getTriple().isOSDarwin()) {
4151 TI.second = getParamTypeAlignment(Ty);
4159 const unsigned OverflowLimit = 8;
4187 if (isInt || IsSoftFloatABI) {
4196 if (isI64 || (isF64 && IsSoftFloatABI)) {
4197 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4198 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4202 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
4208 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4211 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4225 if (!(isInt || IsSoftFloatABI)) {
4234 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
4242 Builder.CreateAdd(NumRegs,
4243 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4254 Builder.
CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4262 Size =
TypeInfo.first.alignTo(OverflowAreaAlign);
4273 if (Align > OverflowAreaAlign) {
4283 Builder.
CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4310 llvm::IntegerType *i8 = CGF.
Int8Ty;
4311 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4312 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4313 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4355 static const unsigned GPRBits = 64;
4358 bool IsSoftFloatABI;
4362 bool IsQPXVectorTy(
const Type *Ty)
const {
4367 unsigned NumElements = VT->getNumElements();
4368 if (NumElements == 1)
4371 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4374 }
else if (VT->getElementType()->
4375 isSpecificBuiltinType(BuiltinType::Float)) {
4384 bool IsQPXVectorTy(
QualType Ty)
const {
4392 IsSoftFloatABI(SoftFloatABI) {}
4394 bool isPromotableTypeForABI(
QualType Ty)
const;
4402 uint64_t Members)
const override;
4420 if (IsQPXVectorTy(T) ||
4436 bool asReturnValue)
const override {
4440 bool isSwiftErrorInRegister()
const override {
4449 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX,
4463 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
4465 PPC64TargetCodeGenInfo(
CodeGenTypes &
CGT) : DefaultTargetCodeGenInfo(CGT) {}
4481 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
4484 Ty = EnumTy->getDecl()->getIntegerType();
4493 switch (BT->getKind()) {
4494 case BuiltinType::Int:
4495 case BuiltinType::UInt:
4509 Ty = CTy->getElementType();
4513 if (IsQPXVectorTy(Ty)) {
4524 const Type *AlignAsType =
nullptr;
4528 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
4531 AlignAsType = EltType;
4535 const Type *
Base =
nullptr;
4536 uint64_t Members = 0;
4537 if (!AlignAsType &&
Kind == ELFv2 &&
4542 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4547 }
else if (AlignAsType) {
4566 uint64_t &Members)
const {
4568 uint64_t NElements = AT->getSize().getZExtValue();
4573 Members *= NElements;
4582 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4583 for (
const auto &I : CXXRD->bases()) {
4588 uint64_t FldMembers;
4592 Members += FldMembers;
4596 for (
const auto *FD : RD->
fields()) {
4601 if (AT->getSize().getZExtValue() == 0)
4603 FT = AT->getElementType();
4613 uint64_t FldMembers;
4618 std::max(Members, FldMembers) : Members + FldMembers);
4632 Ty = CT->getElementType();
4648 QualType EltTy = VT->getElementType();
4649 unsigned NumElements =
4664 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4668 if (BT->getKind() == BuiltinType::Float ||
4669 BT->getKind() == BuiltinType::Double ||
4670 BT->getKind() == BuiltinType::LongDouble ||
4672 (BT->getKind() == BuiltinType::Float128))) {
4685 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4686 const Type *
Base, uint64_t Members)
const {
4696 return Members * NumRegs <= 8;
4712 else if (Size < 128) {
4722 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4726 const Type *Base =
nullptr;
4727 uint64_t Members = 0;
4728 if (
Kind == ELFv2 &&
4731 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4740 if (Bits > 0 && Bits <= 8 * GPRBits) {
4745 if (Bits <= GPRBits)
4747 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4751 uint64_t RegBits = ABIAlign * 8;
4752 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4754 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4763 TyAlign > ABIAlign);
4784 else if (Size < 128) {
4792 const Type *Base =
nullptr;
4793 uint64_t Members = 0;
4794 if (
Kind == ELFv2 &&
4797 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4803 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
4808 if (Bits > GPRBits) {
4809 CoerceTy = llvm::IntegerType::get(
getVMContext(), GPRBits);
4810 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4813 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4829 TypeInfo.second = getParamTypeAlignment(Ty);
4841 if (EltSize < SlotSize) {
4843 SlotSize * 2, SlotSize,
4850 SlotSize - EltSize);
4852 2 * SlotSize - EltSize);
4883 llvm::IntegerType *i8 = CGF.
Int8Ty;
4884 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4885 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4886 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4923 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4959 ABIKind getABIKind()
const {
return Kind; }
4960 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
4966 uint64_t Members)
const override;
4968 bool isIllegalVectorType(
QualType Ty)
const;
4975 it.info = classifyArgumentType(it.type);
4986 return Kind == Win64 ?
EmitMSVAArg(CGF, VAListAddr, Ty)
4987 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4988 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4995 bool asReturnValue)
const override {
4998 bool isSwiftErrorInRegister()
const override {
5003 unsigned elts)
const override;
5011 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5012 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5019 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
5021 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5023 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5026 llvm::Function *Fn = cast<llvm::Function>(GV);
5030 Fn->addFnAttr(
"sign-return-address",
5036 Fn->addFnAttr(
"sign-return-address-key",
5037 Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
5043 Fn->addFnAttr(
"branch-target-enforcement");
5047 class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
5049 WindowsAArch64TargetCodeGenInfo(
CodeGenTypes &
CGT, AArch64ABIInfo::ABIKind K)
5050 : AArch64TargetCodeGenInfo(CGT, K) {}
5052 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5055 void getDependentLibraryOption(llvm::StringRef Lib,
5057 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5060 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5062 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5066 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5068 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5069 if (GV->isDeclaration())
5071 addStackProbeTargetAttributes(D, GV, CGM);
5079 if (isIllegalVectorType(Ty)) {
5092 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 2);
5097 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 4);
5106 Ty = EnumTy->getDecl()->getIntegerType();
5124 if (IsEmpty || Size == 0) {
5130 if (IsEmpty && Size == 0)
5136 const Type *Base =
nullptr;
5137 uint64_t Members = 0;
5147 if (
getTarget().isRenderScriptTarget()) {
5151 if (
Kind == AArch64ABIInfo::AAPCS) {
5153 Alignment = Alignment < 128 ? 64 : 128;
5157 Size = llvm::alignTo(Size, 64);
5161 if (Alignment < 128 && Size == 128) {
5182 RetTy = EnumTy->getDecl()->getIntegerType();
5193 const Type *Base =
nullptr;
5194 uint64_t Members = 0;
5203 if (
getTarget().isRenderScriptTarget()) {
5207 Size = llvm::alignTo(Size, 64);
5211 if (Alignment < 128 && Size == 128) {
5222 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
5225 unsigned NumElements = VT->getNumElements();
5228 if (!llvm::isPowerOf2_32(NumElements))
5230 return Size != 64 && (Size != 128 || NumElements == 1);
5235 bool AArch64ABIInfo::isLegalVectorTypeForSwift(
CharUnits totalSize,
5237 unsigned elts)
const {
5238 if (!llvm::isPowerOf2_32(elts))
5246 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5252 if (BT->isFloatingPoint())
5256 if (VecSize == 64 || VecSize == 128)
5262 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5263 uint64_t Members)
const {
5264 return Members <= 4;
5275 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5279 unsigned NumRegs = 1;
5280 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5281 BaseTy = ArrTy->getElementType();
5282 NumRegs = ArrTy->getNumElements();
5284 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5309 int RegSize = IsIndirect ? 8 : TyInfo.first.
getQuantity();
5318 RegSize = llvm::alignTo(RegSize, 8);
5327 RegSize = 16 * NumRegs;
5339 UsingStack = CGF.
Builder.CreateICmpSGE(
5340 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
5342 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5351 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
5354 reg_offs = CGF.
Builder.CreateAdd(
5355 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
5357 reg_offs = CGF.
Builder.CreateAnd(
5358 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
5367 NewOffset = CGF.
Builder.CreateAdd(
5368 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
5374 InRegs = CGF.
Builder.CreateICmpSLE(
5375 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
5377 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5389 reg_top_offset,
"reg_top_p");
5391 Address BaseAddr(CGF.
Builder.CreateInBoundsGEP(reg_top, reg_offs),
5399 MemTy = llvm::PointerType::getUnqual(MemTy);
5402 const Type *Base =
nullptr;
5403 uint64_t NumMembers = 0;
5405 if (IsHFA && NumMembers > 1) {
5410 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
5413 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5415 std::max(TyAlign, BaseTyInfo.second));
5420 BaseTyInfo.first.getQuantity() < 16)
5421 Offset = 16 - BaseTyInfo.first.getQuantity();
5423 for (
unsigned i = 0; i < NumMembers; ++i) {
5441 CharUnits SlotSize = BaseAddr.getAlignment();
5444 TyInfo.first < SlotSize) {
5468 OnStackPtr = CGF.
Builder.CreatePtrToInt(OnStackPtr, CGF.
Int64Ty);
5470 OnStackPtr = CGF.
Builder.CreateAdd(
5471 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
5473 OnStackPtr = CGF.
Builder.CreateAnd(
5474 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
5479 Address OnStackAddr(OnStackPtr,
5486 StackSize = StackSlotSize;
5488 StackSize = TyInfo.first.
alignTo(StackSlotSize);
5492 CGF.
Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC,
"new_stack");
5498 TyInfo.first < StackSlotSize) {
5513 OnStackAddr, OnStackBlock,
"vaargs.addr");
5545 bool IsIndirect =
false;
5546 if (TyInfo.first.getQuantity() > 16) {
5547 const Type *Base =
nullptr;
5548 uint64_t Members = 0;
5553 TyInfo, SlotSize,
true);
5588 bool isEABI()
const {
5589 switch (
getTarget().getTriple().getEnvironment()) {
5590 case llvm::Triple::Android:
5591 case llvm::Triple::EABI:
5592 case llvm::Triple::EABIHF:
5593 case llvm::Triple::GNUEABI:
5594 case llvm::Triple::GNUEABIHF:
5595 case llvm::Triple::MuslEABI:
5596 case llvm::Triple::MuslEABIHF:
5603 bool isEABIHF()
const {
5604 switch (
getTarget().getTriple().getEnvironment()) {
5605 case llvm::Triple::EABIHF:
5606 case llvm::Triple::GNUEABIHF:
5607 case llvm::Triple::MuslEABIHF:
5614 ABIKind getABIKind()
const {
return Kind; }
5620 uint64_t Members)
const;
5622 bool isIllegalVectorType(
QualType Ty)
const;
5626 uint64_t Members)
const override;
5638 bool asReturnValue)
const override {
5641 bool isSwiftErrorInRegister()
const override {
5645 unsigned elts)
const override;
5653 const ARMABIInfo &getABIInfo()
const {
5661 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5662 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5674 unsigned getSizeOfUnwindException()
const override {
5675 if (getABIInfo().isEABI())
return 88;
5679 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5681 if (GV->isDeclaration())
5683 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5687 const ARMInterruptAttr *
Attr = FD->
getAttr<ARMInterruptAttr>();
5692 switch (Attr->getInterrupt()) {
5693 case ARMInterruptAttr::Generic: Kind =
"";
break;
5694 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
5695 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
5696 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
5697 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
5698 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
5701 llvm::Function *Fn = cast<llvm::Function>(GV);
5703 Fn->addFnAttr(
"interrupt", Kind);
5705 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5706 if (ABI == ARMABIInfo::APCS)
5712 llvm::AttrBuilder B;
5713 B.addStackAlignmentAttr(8);
5714 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5718 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
5721 : ARMTargetCodeGenInfo(CGT, K) {}
5723 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5726 void getDependentLibraryOption(llvm::StringRef Lib,
5728 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5731 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5733 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5737 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5739 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5740 if (GV->isDeclaration())
5742 addStackProbeTargetAttributes(D, GV, CGM);
5766 if (isEABIHF() ||
getTarget().getTriple().isWatchABI())
5767 return llvm::CallingConv::ARM_AAPCS_VFP;
5769 return llvm::CallingConv::ARM_AAPCS;
5771 return llvm::CallingConv::ARM_APCS;
5777 switch (getABIKind()) {
5778 case APCS:
return llvm::CallingConv::ARM_APCS;
5779 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
5780 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5781 case AAPCS16_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5783 llvm_unreachable(
"bad ABI kind");
5786 void ARMABIInfo::setCCs() {
5792 if (abiCC != getLLVMDefaultCC())
5803 if (Size == 64 || Size == 128) {
5813 uint64_t Members)
const {
5814 assert(Base &&
"Base class should be set for homogeneous aggregate");
5819 (VT->getElementType()->isFloat16Type() ||
5820 VT->getElementType()->isHalfType())) {
5822 llvm::Type *NewVecTy = llvm::VectorType::get(
5824 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
5832 bool isVariadic)
const {
5840 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5845 if (isIllegalVectorType(Ty))
5846 return coerceIllegalVector(Ty);
5853 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5862 Ty = EnumTy->getDecl()->getIntegerType();
5877 if (IsEffectivelyAAPCS_VFP) {
5880 const Type *Base =
nullptr;
5881 uint64_t Members = 0;
5883 return classifyHomogeneousAggregate(Ty, Base, Members);
5884 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5888 const Type *Base =
nullptr;
5889 uint64_t Members = 0;
5891 assert(Base && Members <= 4 &&
"unexpected homogeneous aggregate");
5898 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5911 uint64_t ABIAlign = 4;
5913 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5914 getABIKind() == ARMABIInfo::AAPCS) {
5921 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP &&
"unexpected byval");
5924 TyAlign > ABIAlign);
5929 if (
getTarget().isRenderScriptTarget()) {
5950 llvm::LLVMContext &VMContext) {
5982 if (!RT)
return false;
5993 bool HadField =
false;
5996 i != e; ++i, ++idx) {
6035 bool isVariadic)
const {
6036 bool IsEffectivelyAAPCS_VFP =
6037 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
6048 (VT->getElementType()->isFloat16Type() ||
6049 VT->getElementType()->isHalfType()))
6050 return coerceIllegalVector(RetTy);
6058 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
6067 RetTy = EnumTy->getDecl()->getIntegerType();
6074 if (getABIKind() == APCS) {
6107 if (IsEffectivelyAAPCS_VFP) {
6108 const Type *Base =
nullptr;
6109 uint64_t Members = 0;
6111 return classifyHomogeneousAggregate(RetTy, Base, Members);
6120 if (
getTarget().isRenderScriptTarget()) {
6133 }
else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6136 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6144 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
6150 (VT->getElementType()->isFloat16Type() ||
6151 VT->getElementType()->isHalfType()))
6159 unsigned NumElements = VT->getNumElements();
6161 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6165 unsigned NumElements = VT->getNumElements();
6168 if (!llvm::isPowerOf2_32(NumElements))
6177 bool ARMABIInfo::isLegalVectorTypeForSwift(
CharUnits vectorSize,
6179 unsigned numElts)
const {
6180 if (!llvm::isPowerOf2_32(numElts))
6182 unsigned size =
getDataLayout().getTypeStoreSizeInBits(eltTy);
6191 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
6195 if (BT->getKind() == BuiltinType::Float ||
6196 BT->getKind() == BuiltinType::Double ||
6197 BT->getKind() == BuiltinType::LongDouble)
6201 if (VecSize == 64 || VecSize == 128)
6207 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
6208 uint64_t Members)
const {
6209 return Members <= 4;
6224 CharUnits TyAlignForABI = TyInfo.second;
6227 bool IsIndirect =
false;
6228 const Type *Base =
nullptr;
6229 uint64_t Members = 0;
6236 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6244 }
else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6245 getABIKind() == ARMABIInfo::AAPCS) {
6248 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6255 TyInfo.second = TyAlignForABI;
6267 class NVPTXABIInfo :
public ABIInfo {
6284 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6286 bool shouldEmitStaticExternCAliases()
const override;
6291 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
6304 RetTy = EnumTy->getDecl()->getIntegerType();
6313 Ty = EnumTy->getDecl()->getIntegerType();
6338 llvm_unreachable(
"NVPTX does not support varargs");
6341 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6343 if (GV->isDeclaration())
6345 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6348 llvm::Function *F = cast<llvm::Function>(GV);
6354 if (FD->
hasAttr<OpenCLKernelAttr>()) {
6357 addNVVMMetadata(F,
"kernel", 1);
6359 F->addFnAttr(llvm::Attribute::NoInline);
6368 if (FD->
hasAttr<CUDAGlobalAttr>()) {
6370 addNVVMMetadata(F,
"kernel", 1);
6372 if (CUDALaunchBoundsAttr *
Attr = FD->
getAttr<CUDALaunchBoundsAttr>()) {
6374 llvm::APSInt MaxThreads(32);
6375 MaxThreads =
Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
6377 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
6382 if (
Attr->getMinBlocks()) {
6383 llvm::APSInt MinBlocks(32);
6384 MinBlocks =
Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
6387 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
6393 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6395 llvm::Module *M = F->getParent();
6399 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
6401 llvm::Metadata *MDVals[] = {
6402 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6403 llvm::ConstantAsMetadata::get(
6404 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6406 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6409 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
6427 bool isPromotableIntegerType(
QualType Ty)
const;
6428 bool isCompoundType(
QualType Ty)
const;
6429 bool isVectorArgumentType(
QualType Ty)
const;
6430 bool isFPArgumentType(
QualType Ty)
const;
6440 I.info = classifyArgumentType(I.type);
6447 bool asReturnValue)
const override {
6450 bool isSwiftErrorInRegister()
const override {
6463 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
6466 Ty = EnumTy->getDecl()->getIntegerType();
6474 switch (BT->getKind()) {
6475 case BuiltinType::Int:
6476 case BuiltinType::UInt:
6484 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
6490 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
6491 return (HasVector &&
6496 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
6498 switch (BT->getKind()) {
6499 case BuiltinType::Float:
6500 case BuiltinType::Double:
6515 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6516 for (
const auto &I : CXXRD->bases()) {
6525 Found = GetSingleElementType(Base);
6529 for (
const auto *FD : RD->
fields()) {
6541 Found = GetSingleElementType(FD->getType());
6572 bool InFPRs =
false;
6573 bool IsVector =
false;
6577 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6582 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6583 IsVector = ArgTy->isVectorTy();
6584 UnpaddedSize = TyInfo.first;
6585 DirectAlign = TyInfo.second;
6588 if (IsVector && UnpaddedSize > PaddedSize)
6590 assert((UnpaddedSize <= PaddedSize) &&
"Invalid argument size.");
6592 CharUnits Padding = (PaddedSize - UnpaddedSize);
6596 llvm::ConstantInt::get(IndexTy, PaddedSize.
getQuantity());
6604 "overflow_arg_area_ptr");
6614 "overflow_arg_area");
6622 unsigned MaxRegs, RegCountField, RegSaveIndex;
6633 RegPadding = Padding;
6640 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6647 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6654 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
6656 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.
getQuantity()
6659 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
6662 "reg_save_area_ptr");
6672 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6674 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
6695 "overflow_arg_area");
6702 MemAddr, InMemBlock,
"va_arg.addr");
6714 if (isVectorArgumentType(RetTy))
6728 if (isPromotableIntegerType(Ty))
6735 QualType SingleElementTy = GetSingleElementType(Ty);
6736 if (isVectorArgumentType(SingleElementTy) &&
6737 getContext().getTypeSize(SingleElementTy) == Size)
6741 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6754 if (isFPArgumentType(SingleElementTy)) {
6755 assert(Size == 32 || Size == 64);
6766 if (isCompoundType(Ty))
6782 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6788 void MSP430TargetCodeGenInfo::setTargetAttributes(
6790 if (GV->isDeclaration())
6792 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6793 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
6798 llvm::Function *F = cast<llvm::Function>(GV);
6801 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6804 F->addFnAttr(llvm::Attribute::NoInline);
6805 F->addFnAttr(
"interrupt", llvm::utostr(InterruptAttr->getNumber()));
6815 class MipsABIInfo :
public ABIInfo {
6817 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6818 void CoerceToIntArgs(uint64_t TySize,
6825 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6826 StackAlignInBytes(IsO32 ? 8 : 16) {}
6837 unsigned SizeOfUnwindException;
6841 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6847 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6849 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6851 llvm::Function *Fn = cast<llvm::Function>(GV);
6853 if (FD->
hasAttr<MipsLongCallAttr>())
6854 Fn->addFnAttr(
"long-call");
6855 else if (FD->
hasAttr<MipsShortCallAttr>())
6856 Fn->addFnAttr(
"short-call");
6859 if (GV->isDeclaration())
6862 if (FD->
hasAttr<Mips16Attr>()) {
6863 Fn->addFnAttr(
"mips16");
6865 else if (FD->
hasAttr<NoMips16Attr>()) {
6866 Fn->addFnAttr(
"nomips16");
6869 if (FD->
hasAttr<MicroMipsAttr>())
6870 Fn->addFnAttr(
"micromips");
6871 else if (FD->
hasAttr<NoMicroMipsAttr>())
6872 Fn->addFnAttr(
"nomicromips");
6874 const MipsInterruptAttr *
Attr = FD->
getAttr<MipsInterruptAttr>();
6879 switch (Attr->getInterrupt()) {
6880 case MipsInterruptAttr::eic: Kind =
"eic";
break;
6881 case MipsInterruptAttr::sw0: Kind =
"sw0";
break;
6882 case MipsInterruptAttr::sw1: Kind =
"sw1";
break;
6883 case MipsInterruptAttr::hw0: Kind =
"hw0";
break;
6884 case MipsInterruptAttr::hw1: Kind =
"hw1";
break;
6885 case MipsInterruptAttr::hw2: Kind =
"hw2";
break;
6886 case MipsInterruptAttr::hw3: Kind =
"hw3";
break;
6887 case MipsInterruptAttr::hw4: Kind =
"hw4";
break;
6888 case MipsInterruptAttr::hw5: Kind =
"hw5";
break;
6891 Fn->addFnAttr(
"interrupt", Kind);
6898 unsigned getSizeOfUnwindException()
const override {
6899 return SizeOfUnwindException;
6904 void MipsABIInfo::CoerceToIntArgs(
6906 llvm::IntegerType *IntTy =
6907 llvm::IntegerType::get(
getVMContext(), MinABIStackAlignInBytes * 8);
6910 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6911 ArgList.push_back(IntTy);
6914 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6917 ArgList.push_back(llvm::IntegerType::get(
getVMContext(), R));
6926 CoerceToIntArgs(TySize, ArgList);
6937 CoerceToIntArgs(TySize, ArgList);
6943 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
6945 uint64_t LastOffset = 0;
6947 llvm::IntegerType *I64 = llvm::IntegerType::get(
getVMContext(), 64);
6952 i != e; ++i, ++idx) {
6956 if (!BT || BT->
getKind() != BuiltinType::Double)
6964 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6965 ArgList.push_back(I64);
6968 ArgList.push_back(llvm::Type::getDoubleTy(
getVMContext()));
6969 LastOffset = Offset + 64;
6972 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6973 ArgList.append(IntArgList.begin(), IntArgList.end());
6978 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6980 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6983 return llvm::IntegerType::get(
getVMContext(), (Offset - OrigOffset) * 8);
6990 uint64_t OrigOffset =
Offset;
6995 (uint64_t)StackAlignInBytes);
6996 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6997 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
7005 Offset = OrigOffset + MinABIStackAlignInBytes;
7014 getPaddingType(OrigOffset, CurrOffset));
7021 Ty = EnumTy->getDecl()->getIntegerType();
7025 return extendType(Ty);
7028 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
7032 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
7052 for (; b != e; ++b) {
7069 CoerceToIntArgs(Size, RTList);
7081 if (!IsO32 && Size == 0)
7105 RetTy = EnumTy->getDecl()->getIntegerType();
7135 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7137 bool DidPromote =
false;
7157 TyInfo, ArgSlotSize,
true);
7228 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7230 if (GV->isDeclaration())
7232 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7234 auto *Fn = cast<llvm::Function>(GV);
7236 if (FD->getAttr<AVRInterruptAttr>())
7237 Fn->addFnAttr(
"interrupt");
7239 if (FD->getAttr<AVRSignalAttr>())
7240 Fn->addFnAttr(
"signal");
7253 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
7256 : DefaultTargetCodeGenInfo(CGT) {}
7258 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7262 void TCETargetCodeGenInfo::setTargetAttributes(
7264 if (GV->isDeclaration())
7266 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7269 llvm::Function *F = cast<llvm::Function>(GV);
7272 if (FD->
hasAttr<OpenCLKernelAttr>()) {
7274 F->addFnAttr(llvm::Attribute::NoInline);
7275 const ReqdWorkGroupSizeAttr *
Attr = FD->
getAttr<ReqdWorkGroupSizeAttr>();
7278 llvm::LLVMContext &Context = F->getContext();
7279 llvm::NamedMDNode *OpenCLMetadata =
7281 "opencl.kernel_wg_size_info");
7284 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7287 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7288 M.
Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7290 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7291 M.
Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7293 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7294 M.
Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7300 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7301 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7315 class HexagonABIInfo :
public ABIInfo {
7355 Ty = EnumTy->getDecl()->getIntegerType();
7393 RetTy = EnumTy->getDecl()->getIntegerType();
7433 class LanaiABIInfo :
public DefaultABIInfo {
7437 bool shouldUseInReg(
QualType Ty, CCState &State)
const;
7460 bool LanaiABIInfo::shouldUseInReg(
QualType Ty, CCState &State)
const {
7462 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7464 if (SizeInRegs == 0)
7467 if (SizeInRegs > State.FreeRegs) {
7472 State.FreeRegs -= SizeInRegs;
7478 CCState &State)
const {
7480 if (State.FreeRegs) {
7488 const unsigned MinABIStackAlignInBytes = 4;
7492 MinABIStackAlignInBytes);
7496 CCState &State)
const {
7502 return getIndirectResult(Ty,
false, State);
7511 return getIndirectResult(Ty,
true, State);
7519 if (SizeInRegs <= State.FreeRegs) {
7520 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7522 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7523 State.FreeRegs -= SizeInRegs;
7528 return getIndirectResult(Ty,
true, State);
7533 Ty = EnumTy->getDecl()->getIntegerType();
7535 bool InReg = shouldUseInReg(Ty, State);
7560 class AMDGPUABIInfo final :
public DefaultABIInfo {
7562 static const unsigned MaxNumRegsForArgsRet = 16;
7564 unsigned numRegsForType(
QualType Ty)
const;
7568 uint64_t Members)
const override;
7572 DefaultABIInfo(CGT) {}
7581 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
7585 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7586 const Type *Base, uint64_t Members)
const {
7590 return Members * NumRegs <= MaxNumRegsForArgsRet;
7594 unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
7595 unsigned NumRegs = 0;
7600 QualType EltTy = VT->getElementType();
7605 return (VT->getNumElements() + 1) / 2;
7607 unsigned EltNumRegs = (EltSize + 31) / 32;
7608 return EltNumRegs * VT->getNumElements();
7616 QualType FieldTy = Field->getType();
7617 NumRegs += numRegsForType(FieldTy);
7623 return (
getContext().getTypeSize(Ty) + 31) / 32;
7632 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7634 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7635 Arg.info = classifyKernelArgumentType(Arg.type);
7674 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7701 unsigned &NumRegsLeft)
const {
7702 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
7731 unsigned NumRegs = (Size + 31) / 32;
7732 NumRegsLeft -=
std::min(NumRegsLeft, NumRegs);
7745 if (NumRegsLeft > 0) {
7746 unsigned NumRegs = numRegsForType(Ty);
7747 if (NumRegsLeft >= NumRegs) {
7748 NumRegsLeft -= NumRegs;
7757 unsigned NumRegs = numRegsForType(Ty);
7758 NumRegsLeft -=
std::min(NumRegs, NumRegsLeft);
7768 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7770 unsigned getOpenCLKernelCallingConv()
const override;
7773 llvm::PointerType *T,
QualType QT)
const override;
7775 LangAS getASTAllocaAddressSpace()
const override {
7780 const VarDecl *D)
const override;
7782 llvm::LLVMContext &C)
const override;
7785 llvm::Function *BlockInvokeFunc,
7787 bool shouldEmitStaticExternCAliases()
const override;
7792 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7794 if (GV->isDeclaration())
7796 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7800 llvm::Function *F = cast<llvm::Function>(GV);
7803 FD->
getAttr<ReqdWorkGroupSizeAttr>() :
nullptr;
7806 (M.
getTriple().getOS() == llvm::Triple::AMDHSA))
7807 F->addFnAttr(
"amdgpu-implicitarg-num-bytes",
"48");
7809 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7810 if (ReqdWGS || FlatWGS) {
7811 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
7812 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
7813 if (ReqdWGS && Min == 0 && Max == 0)
7814 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7817 assert(Min <= Max &&
"Min must be less than or equal Max");
7819 std::string AttrVal = llvm::utostr(Min) +
"," + llvm::utostr(Max);
7820 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
7822 assert(Max == 0 &&
"Max must be zero");
7825 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>()) {
7826 unsigned Min =
Attr->getMin();
7827 unsigned Max =
Attr->getMax();
7830 assert((Max == 0 || Min <= Max) &&
"Min must be less than or equal Max");
7832 std::string AttrVal = llvm::utostr(Min);
7834 AttrVal = AttrVal +
"," + llvm::utostr(Max);
7835 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
7837 assert(Max == 0 &&
"Max must be zero");
7840 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
7841 unsigned NumSGPR =
Attr->getNumSGPR();
7844 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7847 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
7848 uint32_t NumVGPR =
Attr->getNumVGPR();
7851 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7855 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
7856 return llvm::CallingConv::AMDGPU_KERNEL;
7864 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7868 return llvm::ConstantPointerNull::get(PT);
7871 auto NPT = llvm::PointerType::get(PT->getElementType(),
7873 return llvm::ConstantExpr::getAddrSpaceCast(
7874 llvm::ConstantPointerNull::get(NPT), PT);
7878 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
7882 "Address space agnostic languages only");
7886 return DefaultGlobalAS;
7895 return ConstAS.getValue();
7897 return DefaultGlobalAS;
7901 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
SyncScope S,
7902 llvm::LLVMContext &C)
const {
7917 return C.getOrInsertSyncScopeID(Name);
7920 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
7926 FT = getABIInfo().getContext().adjustFunctionType(
7937 class SparcV8ABIInfo :
public DefaultABIInfo {
8000 class SparcV9ABIInfo :
public ABIInfo {
8021 struct CoerceBuilder {
8022 llvm::LLVMContext &Context;
8023 const llvm::DataLayout &DL;
8028 CoerceBuilder(llvm::LLVMContext &c,
const llvm::DataLayout &dl)
8029 : Context(c), DL(dl), Size(0), InReg(
false) {}
8032 void pad(uint64_t ToSize) {
8033 assert(ToSize >= Size &&
"Cannot remove elements");
8038 uint64_t Aligned = llvm::alignTo(Size, 64);
8039 if (Aligned > Size && Aligned <= ToSize) {
8040 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
8045 while (Size + 64 <= ToSize) {
8046 Elems.push_back(llvm::Type::getInt64Ty(Context));
8051 if (Size < ToSize) {
8052 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
8066 Elems.push_back(Ty);
8067 Size = Offset + Bits;
8071 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
8072 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
8073 for (
unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
8074 llvm::Type *ElemTy = StrTy->getElementType(i);
8075 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
8076 switch (ElemTy->getTypeID()) {
8077 case llvm::Type::StructTyID:
8078 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
8080 case llvm::Type::FloatTyID:
8081 addFloat(ElemOffset, ElemTy, 32);
8083 case llvm::Type::DoubleTyID:
8084 addFloat(ElemOffset, ElemTy, 64);
8086 case llvm::Type::FP128TyID:
8087 addFloat(ElemOffset, ElemTy, 128);
8089 case llvm::Type::PointerTyID:
8090 if (ElemOffset % 64 == 0) {
8092 Elems.push_back(ElemTy);
8103 bool isUsableType(llvm::StructType *Ty)
const {
8104 return llvm::makeArrayRef(Elems) == Ty->elements();
8109 if (Elems.size() == 1)
8110 return Elems.front();
8112 return llvm::StructType::get(Context, Elems);
8127 if (Size > SizeLimit)
8132 Ty = EnumTy->getDecl()->getIntegerType();
8135 if (Size < 64 && Ty->isIntegerType())
8149 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(
CGT.
ConvertType(Ty));
8154 CB.addStruct(0, StrTy);
8155 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8158 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8177 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8187 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8219 return Builder.
CreateBitCast(ArgAddr, ArgPtrTy,
"arg.addr");
8251 llvm::IntegerType *i8 = CGF.
Int8Ty;
8252 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8253 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8280 class ARCABIInfo :
public DefaultABIInfo {
8282 using DefaultABIInfo::DefaultABIInfo;
8289 if (!State.FreeRegs)
8295 if (sz < State.FreeRegs)
8296 State.FreeRegs -= sz;
8312 updateState(I.info, I.type, State);
8336 const unsigned MinABIStackAlignInBytes = 4;
8339 TypeAlign > MinABIStackAlignInBytes);
8350 uint8_t FreeRegs)
const {
8356 return getIndirectByRef(Ty, FreeRegs > 0);
8359 return getIndirectByValue(Ty);
8364 Ty = EnumTy->getDecl()->getIntegerType();
8366 auto SizeInRegs = llvm::alignTo(
getContext().getTypeSize(Ty), 32) / 32;
8371 return getIndirectByValue(Ty);
8379 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8381 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8383 return FreeRegs >= SizeInRegs ?
8400 auto RetSize = llvm::alignTo(
getContext().getTypeSize(RetTy), 32) / 32;
8402 return getIndirectByRef(RetTy,
true);
8473 class TypeStringCache {
8474 enum Status {NonRecursive, Recursive,
Incomplete, IncompleteUsed};
8478 std::string Swapped;
8481 std::map<const IdentifierInfo *, struct Entry> Map;
8482 unsigned IncompleteCount;
8483 unsigned IncompleteUsedCount;
8485 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8495 class FieldEncoding {
8499 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8500 StringRef
str() {
return Enc; }
8501 bool operator<(
const FieldEncoding &rhs)
const {
8502 if (HasName != rhs.HasName)
return HasName;
8503 return Enc < rhs.Enc;
8507 class XCoreABIInfo :
public DefaultABIInfo {
8515 mutable TypeStringCache TSC;
8519 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8539 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8540 AI.setCoerceToType(ArgTy);
8541 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8545 switch (AI.getKind()) {
8549 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8551 Val =
Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8559 ArgSize = ArgSize.
alignTo(SlotSize);
8584 std::string StubEnc) {
8588 assert( (E.Str.empty() || E.State == Recursive) &&
8589 "Incorrectly use of addIncomplete");
8590 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
8591 E.Swapped.swap(E.Str);
8592 E.Str.swap(StubEnc);
8601 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
8604 auto I = Map.find(ID);
8605 assert(I != Map.end() &&
"Entry not present");
8606 Entry &E = I->second;
8608 E.State == IncompleteUsed) &&
8609 "Entry must be an incomplete type");
8610 bool IsRecursive =
false;
8611 if (E.State == IncompleteUsed) {
8614 --IncompleteUsedCount;
8616 if (E.Swapped.empty())
8620 E.Swapped.swap(E.Str);
8622 E.State = Recursive;
8630 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
8632 if (!ID || IncompleteUsedCount)
8635 if (IsRecursive && !E.Str.empty()) {
8636 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8637 "This is not the same Recursive entry");
8643 assert(E.Str.empty() &&
"Entry already present");
8645 E.State = IsRecursive? Recursive : NonRecursive;
8654 auto I = Map.find(ID);
8657 Entry &E = I->second;
8658 if (E.State == Recursive && IncompleteCount)
8663 E.State = IncompleteUsed;
8664 ++IncompleteUsedCount;
8685 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8689 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
8690 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8691 llvm::MDString::get(Ctx, Enc.str())};
8692 llvm::NamedMDNode *MD =
8693 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
8694 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8707 unsigned getOpenCLKernelCallingConv()
const override;
8715 DefaultABIInfo SPIRABI(CGM.
getTypes());
8716 SPIRABI.computeInfo(FI);
8721 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
8722 return llvm::CallingConv::SPIR_KERNEL;
8727 TypeStringCache &TSC);
8735 TypeStringCache &TSC) {
8736 for (
const auto *Field : RD->
fields()) {
8739 Enc += Field->getName();
8741 if (Field->isBitField()) {
8743 llvm::raw_svector_ostream OS(Enc);
8744 OS << Field->getBitWidthValue(CGM.
getContext());
8747 if (!
appendType(Enc, Field->getType(), CGM, TSC))
8749 if (Field->isBitField())
8752 FE.emplace_back(!Field->getName().empty(), Enc);
8764 StringRef TypeString = TSC.lookupStr(ID);
8765 if (!TypeString.empty()) {
8771 size_t Start = Enc.size();
8779 bool IsRecursive =
false;
8786 std::string StubEnc(Enc.substr(Start).str());
8788 TSC.addIncomplete(ID, std::move(StubEnc));
8790 (void) TSC.removeIncomplete(ID);
8793 IsRecursive = TSC.removeIncomplete(ID);
8799 unsigned E = FE.size();
8800 for (
unsigned I = 0; I != E; ++I) {
8807 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8813 TypeStringCache &TSC,
8816 StringRef TypeString = TSC.lookupStr(ID);
8817 if (!TypeString.empty()) {
8822 size_t Start = Enc.size();
8831 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8833 SmallStringEnc EnumEnc;
8835 EnumEnc += I->getName();
8837 I->getInitVal().toString(EnumEnc);
8839 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8842 unsigned E = FE.size();
8843 for (
unsigned I = 0; I != E; ++I) {
8850 TSC.addIfComplete(ID, Enc.substr(Start),
false);
8858 static const char *
const Table[]={
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
8866 Enc += Table[Lookup];
8871 const char *EncType;
8873 case BuiltinType::Void:
8876 case BuiltinType::Bool:
8879 case BuiltinType::Char_U:
8882 case BuiltinType::UChar:
8885 case BuiltinType::SChar:
8888 case BuiltinType::UShort:
8891 case BuiltinType::Short:
8894 case BuiltinType::UInt:
8897 case BuiltinType::Int:
8900 case BuiltinType::ULong:
8903 case BuiltinType::Long:
8906 case BuiltinType::ULongLong:
8909 case BuiltinType::LongLong:
8912 case BuiltinType::Float:
8915 case BuiltinType::Double:
8918 case BuiltinType::LongDouble:
8931 TypeStringCache &TSC) {
8943 TypeStringCache &TSC, StringRef NoSizeEnc) {
8948 CAT->getSize().toStringUnsigned(Enc);
8964 TypeStringCache &TSC) {
8971 auto I = FPT->param_type_begin();
8972 auto E = FPT->param_type_end();
8981 if (FPT->isVariadic())
8984 if (FPT->isVariadic())
8998 TypeStringCache &TSC) {
9035 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
9038 return appendType(Enc, FD->getType(), CGM, TSC);
9041 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
9044 QualType QT = VD->getType().getCanonicalType();
9061 class RISCVABIInfo :
public DefaultABIInfo {
9064 static const int NumArgGPRs = 8;
9068 : DefaultABIInfo(CGT), XLen(XLen) {}
9075 int &ArgGPRsLeft)
const;
9102 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
9107 bool IsFixed = ArgNum < NumFixedArgs;
9114 int &ArgGPRsLeft)
const {
9115 assert(ArgGPRsLeft <= NumArgGPRs &&
"Arg GPR tracking underflow");
9133 bool MustUseStack =
false;
9137 int NeededArgGPRs = 1;
9138 if (!IsFixed && NeededAlign == 2 * XLen)
9139 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
9140 else if (Size > XLen && Size <= 2 * XLen)
9143 if (NeededArgGPRs > ArgGPRsLeft) {
9144 MustUseStack =
true;
9145 NeededArgGPRs = ArgGPRsLeft;
9148 ArgGPRsLeft -= NeededArgGPRs;
9153 Ty = EnumTy->getDecl()->getIntegerType();
9157 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
9158 return extendType(Ty);
9166 if (Size <= 2 * XLen) {
9174 }
else if (Alignment == 2 * XLen) {
9189 int ArgGPRsLeft = 2;
9207 std::pair<CharUnits, CharUnits> SizeAndAlign =
9211 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
9231 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
9233 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
9236 const auto *
Attr = FD->getAttr<RISCVInterruptAttr>();
9241 switch (
Attr->getInterrupt()) {
9242 case RISCVInterruptAttr::user: Kind =
"user";
break;
9243 case RISCVInterruptAttr::supervisor: Kind =
"supervisor";
break;
9244 case RISCVInterruptAttr::machine: Kind =
"machine";
break;
9247 auto *Fn = cast<llvm::Function>(GV);
9249 Fn->addFnAttr(
"interrupt", Kind);
9259 return getTriple().supportsCOMDAT();
9263 if (TheTargetCodeGenInfo)
9264 return *TheTargetCodeGenInfo;
9268 this->TheTargetCodeGenInfo.reset(
P);
9273 switch (Triple.getArch()) {
9275 return SetCGInfo(
new DefaultTargetCodeGenInfo(Types));
9277 case llvm::Triple::le32:
9278 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
9279 case llvm::Triple::mips:
9280 case llvm::Triple::mipsel:
9281 if (Triple.getOS() == llvm::Triple::NaCl)
9282 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
9283 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
true));
9285 case llvm::Triple::mips64:
9286 case llvm::Triple::mips64el:
9287 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
false));
9289 case llvm::Triple::avr:
9290 return SetCGInfo(
new AVRTargetCodeGenInfo(Types));
9292 case llvm::Triple::aarch64:
9293 case llvm::Triple::aarch64_be: {
9294 AArch64ABIInfo::ABIKind
Kind = AArch64ABIInfo::AAPCS;
9295 if (
getTarget().getABI() ==
"darwinpcs")
9296 Kind = AArch64ABIInfo::DarwinPCS;
9297 else if (Triple.isOSWindows())
9299 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
9301 return SetCGInfo(
new AArch64TargetCodeGenInfo(Types, Kind));
9304 case llvm::Triple::wasm32:
9305 case llvm::Triple::wasm64:
9306 return SetCGInfo(
new WebAssemblyTargetCodeGenInfo(Types));
9308 case llvm::Triple::arm:
9309 case llvm::Triple::armeb:
9310 case llvm::Triple::thumb:
9311 case llvm::Triple::thumbeb: {
9312 if (Triple.getOS() == llvm::Triple::Win32) {
9314 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
9317 ARMABIInfo::ABIKind
Kind = ARMABIInfo::AAPCS;
9319 if (ABIStr ==
"apcs-gnu")
9320 Kind = ARMABIInfo::APCS;
9321 else if (ABIStr ==
"aapcs16")
9322 Kind = ARMABIInfo::AAPCS16_VFP;
9323 else if (CodeGenOpts.FloatABI ==
"hard" ||
9324 (CodeGenOpts.FloatABI !=
"soft" &&
9325 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
9326 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
9327 Triple.getEnvironment() == llvm::Triple::EABIHF)))
9328 Kind = ARMABIInfo::AAPCS_VFP;
9330 return SetCGInfo(
new ARMTargetCodeGenInfo(Types, Kind));
9333 case llvm::Triple::ppc:
9335 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI ==
"soft"));
9336 case llvm::Triple::ppc64:
9337 if (Triple.isOSBinFormatELF()) {
9338 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv1;
9340 Kind = PPC64_SVR4_ABIInfo::ELFv2;
9342 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
9344 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9347 return SetCGInfo(
new PPC64TargetCodeGenInfo(Types));
9348 case llvm::Triple::ppc64le: {
9349 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
9350 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv2;
9352 Kind = PPC64_SVR4_ABIInfo::ELFv1;
9354 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
9356 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9360 case llvm::Triple::nvptx:
9361 case llvm::Triple::nvptx64:
9362 return SetCGInfo(
new NVPTXTargetCodeGenInfo(Types));
9364 case llvm::Triple::msp430:
9365 return SetCGInfo(
new MSP430TargetCodeGenInfo(Types));
9367 case llvm::Triple::riscv32:
9368 return SetCGInfo(
new RISCVTargetCodeGenInfo(Types, 32));
9369 case llvm::Triple::riscv64:
9370 return SetCGInfo(
new RISCVTargetCodeGenInfo(Types, 64));
9372 case llvm::Triple::systemz: {
9374 return SetCGInfo(
new SystemZTargetCodeGenInfo(Types, HasVector));
9377 case llvm::Triple::tce:
9378 case llvm::Triple::tcele:
9379 return SetCGInfo(
new TCETargetCodeGenInfo(Types));
9381 case llvm::Triple::x86: {
9382 bool IsDarwinVectorABI = Triple.isOSDarwin();
9383 bool RetSmallStructInRegABI =
9384 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
9385 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
9387 if (Triple.getOS() == llvm::Triple::Win32) {
9388 return SetCGInfo(
new WinX86_32TargetCodeGenInfo(
9389 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9390 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
9392 return SetCGInfo(
new X86_32TargetCodeGenInfo(
9393 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9394 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
9395 CodeGenOpts.FloatABI ==
"soft"));
9399 case llvm::Triple::x86_64: {
9403 ? X86AVXABILevel::AVX512
9406 switch (Triple.getOS()) {
9407 case llvm::Triple::Win32:
9408 return SetCGInfo(
new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
9409 case llvm::Triple::PS4:
9410 return SetCGInfo(
new PS4TargetCodeGenInfo(Types, AVXLevel));
9412 return SetCGInfo(
new X86_64TargetCodeGenInfo(Types, AVXLevel));
9415 case llvm::Triple::hexagon:
9416 return SetCGInfo(
new HexagonTargetCodeGenInfo(Types));
9417 case llvm::Triple::lanai:
9418 return SetCGInfo(
new LanaiTargetCodeGenInfo(Types));
9419 case llvm::Triple::r600:
9420 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
9421 case llvm::Triple::amdgcn:
9422 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
9423 case llvm::Triple::sparc:
9424 return SetCGInfo(
new SparcV8TargetCodeGenInfo(Types));
9425 case llvm::Triple::sparcv9:
9426 return SetCGInfo(
new SparcV9TargetCodeGenInfo(Types));
9427 case llvm::Triple::xcore:
9428 return SetCGInfo(
new XCoreTargetCodeGenInfo(Types));
9429 case llvm::Triple::arc:
9430 return SetCGInfo(
new ARCTargetCodeGenInfo(Types));
9431 case llvm::Triple::spir:
9432 case llvm::Triple::spir64:
9433 return SetCGInfo(
new SPIRTargetCodeGenInfo(Types));
9444 llvm::Function *Invoke,
9446 auto *InvokeFT = Invoke->getFunctionType();
9448 for (
auto &
P : InvokeFT->params())
9449 ArgTys.push_back(
P);
9451 std::string Name = Invoke->getName().str() +
"_kernel";
9452 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9455 auto IP = CGF.
Builder.saveIP();
9458 Builder.SetInsertPoint(BB);
9460 for (
auto &A : F->args())
9462 Builder.CreateCall(Invoke, Args);
9463 Builder.CreateRetVoid();
9464 Builder.restoreIP(IP);
9476 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
9482 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
9483 auto *InvokeFT = Invoke->getFunctionType();
9492 ArgTys.push_back(BlockTy);
9493 ArgTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9494 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
9495 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9496 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9497 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9498 ArgNames.push_back(llvm::MDString::get(C,
"block_literal"));
9499 for (
unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
9500 ArgTys.push_back(InvokeFT->getParamType(I));
9501 ArgTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9502 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
9503 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9504 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9505 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9507 llvm::MDString::get(C, (Twine(
"local_arg") + Twine(I)).
str()));
9509 std::string Name = Invoke->getName().str() +
"_kernel";
9510 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9513 F->addFnAttr(
"enqueued-block");
9514 auto IP = CGF.
Builder.saveIP();
9516 Builder.SetInsertPoint(BB);
9517 unsigned BlockAlign = CGF.
CGM.
getDataLayout().getPrefTypeAlignment(BlockTy);
9518 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
9519 BlockPtr->setAlignment(BlockAlign);
9520 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
9521 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
9523 Args.push_back(Cast);
9524 for (
auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
9526 Builder.CreateCall(Invoke, Args);
9527 Builder.CreateRetVoid();
9528 Builder.restoreIP(IP);
9530 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
9531 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
9532 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
9533 F->setMetadata(
"kernel_arg_base_type",
9534 llvm::MDNode::get(C, ArgBaseTypeNames));
9535 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
9537 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(C, ArgNames));
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
Ignore - Ignore the argument (treat as void).
bool isFloatingPoint() const
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Represents a function declaration or definition.
void setEffectiveCallingConvention(unsigned Value)
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T -> getSizeExpr()))
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isBlockPointerType() const
CodeGenTypes & getTypes()
bool isMemberPointerType() const
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate. ...
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
FunctionType - C99 6.7.5.3 - Function Declarators.
llvm::ConstantInt * getSize(CharUnits N)
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isRealFloatingType() const
Floating point categories.
Extend - Valid only for integer argument types.
bool isRecordType() const
Decl - This represents one declaration (or definition), e.g.
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
CharUnits getPointerSize() const
const RecordType * getAsStructureType() const
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const llvm::DataLayout & getDataLayout() const
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
The base class of the type hierarchy.
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isZero() const
isZero - Test whether the quantity equals zero.
const TargetInfo & getTargetInfo() const
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Handles the type's qualifier before dispatching a call to handle specific type encodings.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
virtual ~TargetCodeGenInfo()
void setCanBeFlattened(bool Flatten)
QualType getElementType() const
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
unsigned getTypeAlign(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in bits.
ASTContext & getContext() const
Represents a variable declaration or definition.
LangAS getLangASFromTargetAS(unsigned TargetAS)
bool isEnumeralType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
bool supportsCOMDAT() const
LangAS
Defines the address space values used by the address space qualifier of QualType. ...
llvm::LLVMContext & getVMContext() const
void setCoerceToType(llvm::Type *T)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * getPointer() const
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Represents a struct/union/class.
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
static ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
CodeGen::CodeGenTypes & CGT
One of these records is kept for each identifier that is lexed.
Address getAddress() const
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
llvm::IntegerType * Int64Ty
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
field_range fields() const
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
Represents a member of a struct/union/class.
bool isReferenceType() const
CharUnits getTypeUnadjustedAlignInChars(QualType T) const
getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a type, in characters, before alignment adjustments.
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
static bool occupiesMoreThan(CodeGenTypes &cgt, ArrayRef< llvm::Type *> scalarTypes, unsigned maxAllRegisters)
Does the given lowering require more than the given number of registers when expanded?
ABIInfo(CodeGen::CodeGenTypes &cgt)
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal=true, bool Realign=false)
virtual bool hasLegalHalfType() const
Determine whether _Float16 is supported on this target.
virtual StringRef getABI() const
Get the ABI currently in use.
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
bool getHasRegParm() const
bool isBitField() const
Determines whether this field is a bitfield.
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends structure and union types to Enc and adds encoding to cache.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
CharUnits - This is an opaque type for sizes expressed in character units.
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type...
CharUnits getAlignment() const
Return the alignment of this pointer.
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
const_arg_iterator arg_begin() const
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
field_iterator field_begin() const
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static ABIArgInfo getExpand()
CharUnits getPointerAlign() const
bool isFloat128Type() const
bool isScalarType() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
unsigned getTypeUnadjustedAlign(QualType T) const
Return the ABI-specified natural alignment of a (complete) type T, before alignment adjustments...
constexpr XRayInstrMask All
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor)
isTypeConstant - Determine whether an object of this type can be emitted as a constant.
ExtInfo withCallingConv(CallingConv cc) const
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
ContainsFloatAtOffset - Return true if the specified LLVM IR type has a float member at the specified...
static ABIArgInfo getSignExtend(QualType Ty, llvm::Type *T=nullptr)
CanQualType getReturnType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
static CharUnits One()
One - Construct a CharUnits quantity of one.
ASTContext & getContext() const
Represents a prototype with parameter type info, e.g.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
const TargetCodeGenInfo & getTargetCodeGenInfo()
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Helper function for appendRecordType().
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
Gets the linker options necessary to link a dependent library on this platform.
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
void setAddress(Address address)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
const llvm::fltSemantics & getLongDoubleFormat() const
Exposes information about the current target.
CodeGen::ABIArgInfo getNaturalAlignIndirect(QualType Ty, bool ByRef=true, bool Realign=false, llvm::Type *Padding=nullptr) const
A convenience method to return an indirect ABIArgInfo with an expected alignment equal to the ABI ali...
QualType getElementType() const
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorType::VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
field_iterator field_end() const
virtual bool classifyReturnType(CGFunctionInfo &FI) const =0
If the C++ ABI requires the given type be returned in a particular way, this method sets RetAI and re...
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isAnyComplexType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
The XCore ABI includes a type information section that communicates symbol type information to the li...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
EnumDecl * getDefinition() const
llvm::CallingConv::ID RuntimeCC
static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
llvm::LLVMContext & getLLVMContext()
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
llvm::IntegerType * Int32Ty
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty, bool Realign=false) const
const CodeGenOptions & getCodeGenOpts() const
bool canHaveCoerceToType() const
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
bool getIndirectByVal() const
static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Represents a GCC generic vector type.
ArraySizeModifier getSizeModifier() const
virtual unsigned getSizeOfUnwindException() const
Determines the size of struct _Unwind_Exception on this platform, in 8-bit units. ...
Implements C++ ABI-specific semantic analysis functions.
const TargetInfo & getTarget() const
const LangOptions & getLangOpts() const
ASTContext & getContext() const
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Attempt to be ABI-compatible with code generated by Clang 3.8.x (SVN r257626).
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
bool isConstQualified() const
Determine whether this type is const-qualified.
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Pass it as a pointer to temporary memory.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
bool isStructureOrClassType() const
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
Appends type's qualifier to Enc.
static Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
QualType getCanonicalType() const
bool isBuiltinType() const
Helper methods to distinguish type categories.
QualType getReturnType() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums...
bool isSRetAfterThis() const
LangAS getAddressSpace() const
Return the address space of this type.
unsigned getRegParm() const
const TargetInfo & getTarget() const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
isEmptyRecord - Return true iff a structure contains only empty fields.
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a function encoding to Enc, calling appendType for the return type and the arguments...
SyncScope
Defines synch scope values used internally by clang.
const llvm::DataLayout & getDataLayout() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const ConstantArrayType * getAsConstantArrayType(QualType T) const
const_arg_iterator arg_end() const
CoerceAndExpand - Only valid for aggregate argument types.
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
bool isMemberFunctionPointerType() const
llvm::LLVMContext & getLLVMContext()
bool canPassInRegisters() const
Determine whether this class can be passed in registers.
constexpr XRayInstrMask None
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
bool isTargetAddressSpace(LangAS AS)
EnumDecl * getDecl() const
bool isVectorType() const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues, like target-specific attributes, builtins and so on.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
X86AVXABILevel
The AVX ABI level for X86 targets.
llvm::CallingConv::ID getRuntimeCC() const
Return the calling convention to use for system runtime functions.
bool hasFlexibleArrayMember() const
static llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
StringRef getName() const
Return the actual identifier string.
const TargetInfo & getTarget() const
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
bool isFloat16Type() const
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA...
ExtInfo getExtInfo() const
A refining implementation of ABIInfo for targets that support swiftcall.
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
virtual llvm::Function * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Value *BlockLiteral) const
Create an OpenCL kernel for an enqueued block.
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::IntegerType * IntPtrTy
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
llvm::Module & getModule() const
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
unsigned getIntWidth(QualType T) const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual llvm::Optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory...
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Complex values, per C99 6.2.5p11.
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize, const llvm::Twine &Name="")
Given addr = [n x T]* ...
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Implements C++ ABI-specific code generation functions.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
llvm::PointerType * Int8PtrTy
CodeGen::CGCXXABI & getCXXABI() const
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
Expand - Only valid for aggregate argument types.
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
virtual bool hasFloat128Type() const
Determine whether the __float128 type is supported on this target.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
Represents a base class of a C++ class.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
ASTContext & getContext() const
Pass it on the stack using its defined layout.
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const
Get the syncscope used in LLVM IR.
CallingConv getCallConv() const
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Represents a C++ struct/union/class.
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
llvm::Type * ConvertType(QualType T)
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like 'int'.
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, std::pair< CharUnits, CharUnits > ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions...
__DEVICE__ int max(int __a, int __b)
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
bool isPointerType() const
unsigned getNumRequiredArgs() const
__DEVICE__ int min(int __a, int __b)
unsigned getDirectOffset() const
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isFloatingType() const
LValue - This represents an lvalue references.
llvm::Type * getCoerceToType() const
void setInAllocaSRet(bool SRet)
unsigned getTargetAddressSpace(QualType T) const
RecordArgABI
Specify how one should pass an argument of a record type.
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
CallArgList - Type for representing both the value and type of arguments in a call.
const LangOptions & getLangOpts() const
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
Represents the canonical version of C arrays with a specified constant size.
bool getIndirectRealign() const
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
Attr - This represents one attribute.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr *> VL, ArrayRef< Expr *> PL, ArrayRef< Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate)
Creates clause with a list of variables VL and a linear step Step.
const CodeGenOptions & getCodeGenOpts() const
const llvm::Triple & getTriple() const