clang  5.0.0
SwiftCallingConv.cpp
Go to the documentation of this file.
1 //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Implementation of the abstract lowering for the Swift calling convention.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "clang/Basic/TargetInfo.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 
19 using namespace clang;
20 using namespace CodeGen;
21 using namespace swiftcall;
22 
24  return cast<SwiftABIInfo>(CGM.getTargetCodeGenInfo().getABIInfo());
25 }
26 
27 static bool isPowerOf2(unsigned n) {
28  return n == (n & -n);
29 }
30 
31 /// Given two types with the same size, try to find a common type.
32 static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) {
33  assert(first != second);
34 
35  // Allow pointers to merge with integers, but prefer the integer type.
36  if (first->isIntegerTy()) {
37  if (second->isPointerTy()) return first;
38  } else if (first->isPointerTy()) {
39  if (second->isIntegerTy()) return second;
40  if (second->isPointerTy()) return first;
41 
42  // Allow two vectors to be merged (given that they have the same size).
43  // This assumes that we never have two different vector register sets.
44  } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
45  if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
46  if (auto commonTy = getCommonType(firstVecTy->getElementType(),
47  secondVecTy->getElementType())) {
48  return (commonTy == firstVecTy->getElementType() ? first : second);
49  }
50  }
51  }
52 
53  return nullptr;
54 }
55 
57  return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type));
58 }
59 
61  return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type));
62 }
63 
65  // Deal with various aggregate types as special cases:
66 
67  // Record types.
68  if (auto recType = type->getAs<RecordType>()) {
69  addTypedData(recType->getDecl(), begin);
70 
71  // Array types.
72  } else if (type->isArrayType()) {
73  // Incomplete array types (flexible array members?) don't provide
74  // data to lay out, and the other cases shouldn't be possible.
75  auto arrayType = CGM.getContext().getAsConstantArrayType(type);
76  if (!arrayType) return;
77 
78  QualType eltType = arrayType->getElementType();
79  auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
80  for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
81  addTypedData(eltType, begin + i * eltSize);
82  }
83 
84  // Complex types.
85  } else if (auto complexType = type->getAs<ComplexType>()) {
86  auto eltType = complexType->getElementType();
87  auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
88  auto eltLLVMType = CGM.getTypes().ConvertType(eltType);
89  addTypedData(eltLLVMType, begin, begin + eltSize);
90  addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
91 
92  // Member pointer types.
93  } else if (type->getAs<MemberPointerType>()) {
94  // Just add it all as opaque.
95  addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));
96 
97  // Everything else is scalar and should not convert as an LLVM aggregate.
98  } else {
99  // We intentionally convert as !ForMem because we want to preserve
100  // that a type was an i1.
101  auto llvmType = CGM.getTypes().ConvertType(type);
102  addTypedData(llvmType, begin);
103  }
104 }
105 
107  addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record));
108 }
109 
111  const ASTRecordLayout &layout) {
112  // Unions are a special case.
113  if (record->isUnion()) {
114  for (auto field : record->fields()) {
115  if (field->isBitField()) {
116  addBitFieldData(field, begin, 0);
117  } else {
118  addTypedData(field->getType(), begin);
119  }
120  }
121  return;
122  }
123 
124  // Note that correctness does not rely on us adding things in
125  // their actual order of layout; it's just somewhat more efficient
126  // for the builder.
127 
128  // With that in mind, add "early" C++ data.
129  auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
130  if (cxxRecord) {
131  // - a v-table pointer, if the class adds its own
132  if (layout.hasOwnVFPtr()) {
133  addTypedData(CGM.Int8PtrTy, begin);
134  }
135 
136  // - non-virtual bases
137  for (auto &baseSpecifier : cxxRecord->bases()) {
138  if (baseSpecifier.isVirtual()) continue;
139 
140  auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
141  addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
142  }
143 
144  // - a vbptr if the class adds its own
145  if (layout.hasOwnVBPtr()) {
146  addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
147  }
148  }
149 
150  // Add fields.
151  for (auto field : record->fields()) {
152  auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
153  if (field->isBitField()) {
154  addBitFieldData(field, begin, fieldOffsetInBits);
155  } else {
156  addTypedData(field->getType(),
157  begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
158  }
159  }
160 
161  // Add "late" C++ data:
162  if (cxxRecord) {
163  // - virtual bases
164  for (auto &vbaseSpecifier : cxxRecord->vbases()) {
165  auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
166  addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));
167  }
168  }
169 }
170 
171 void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield,
172  CharUnits recordBegin,
173  uint64_t bitfieldBitBegin) {
174  assert(bitfield->isBitField());
175  auto &ctx = CGM.getContext();
176  auto width = bitfield->getBitWidthValue(ctx);
177 
178  // We can ignore zero-width bit-fields.
179  if (width == 0) return;
180 
181  // toCharUnitsFromBits rounds down.
182  CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
183 
184  // Find the offset of the last byte that is partially occupied by the
185  // bit-field; since we otherwise expect exclusive ends, the end is the
186  // next byte.
187  uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
188  CharUnits bitfieldByteEnd =
189  ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One();
190  addOpaqueData(recordBegin + bitfieldByteBegin,
191  recordBegin + bitfieldByteEnd);
192 }
193 
195  assert(type && "didn't provide type for typed data");
196  addTypedData(type, begin, begin + getTypeStoreSize(CGM, type));
197 }
198 
200  CharUnits begin, CharUnits end) {
201  assert(type && "didn't provide type for typed data");
202  assert(getTypeStoreSize(CGM, type) == end - begin);
203 
204  // Legalize vector types.
205  if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
206  SmallVector<llvm::Type*, 4> componentTys;
207  legalizeVectorType(CGM, end - begin, vecTy, componentTys);
208  assert(componentTys.size() >= 1);
209 
210  // Walk the initial components.
211  for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) {
212  llvm::Type *componentTy = componentTys[i];
213  auto componentSize = getTypeStoreSize(CGM, componentTy);
214  assert(componentSize < end - begin);
215  addLegalTypedData(componentTy, begin, begin + componentSize);
216  begin += componentSize;
217  }
218 
219  return addLegalTypedData(componentTys.back(), begin, end);
220  }
221 
222  // Legalize integer types.
223  if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
224  if (!isLegalIntegerType(CGM, intTy))
225  return addOpaqueData(begin, end);
226  }
227 
228  // All other types should be legal.
229  return addLegalTypedData(type, begin, end);
230 }
231 
232 void SwiftAggLowering::addLegalTypedData(llvm::Type *type,
233  CharUnits begin, CharUnits end) {
234  // Require the type to be naturally aligned.
235  if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) {
236 
237  // Try splitting vector types.
238  if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
239  auto split = splitLegalVectorType(CGM, end - begin, vecTy);
240  auto eltTy = split.first;
241  auto numElts = split.second;
242 
243  auto eltSize = (end - begin) / numElts;
244  assert(eltSize == getTypeStoreSize(CGM, eltTy));
245  for (size_t i = 0, e = numElts; i != e; ++i) {
246  addLegalTypedData(eltTy, begin, begin + eltSize);
247  begin += eltSize;
248  }
249  assert(begin == end);
250  return;
251  }
252 
253  return addOpaqueData(begin, end);
254  }
255 
256  addEntry(type, begin, end);
257 }
258 
259 void SwiftAggLowering::addEntry(llvm::Type *type,
260  CharUnits begin, CharUnits end) {
261  assert((!type ||
262  (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) &&
263  "cannot add aggregate-typed data");
264  assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type)));
265 
266  // Fast path: we can just add entries to the end.
267  if (Entries.empty() || Entries.back().End <= begin) {
268  Entries.push_back({begin, end, type});
269  return;
270  }
271 
272  // Find the first existing entry that ends after the start of the new data.
273  // TODO: do a binary search if Entries is big enough for it to matter.
274  size_t index = Entries.size() - 1;
275  while (index != 0) {
276  if (Entries[index - 1].End <= begin) break;
277  --index;
278  }
279 
280  // The entry ends after the start of the new data.
281  // If the entry starts after the end of the new data, there's no conflict.
282  if (Entries[index].Begin >= end) {
283  // This insertion is potentially O(n), but the way we generally build
284  // these layouts makes that unlikely to matter: we'd need a union of
285  // several very large types.
286  Entries.insert(Entries.begin() + index, {begin, end, type});
287  return;
288  }
289 
290  // Otherwise, the ranges overlap. The new range might also overlap
291  // with later ranges.
292 restartAfterSplit:
293 
294  // Simplest case: an exact overlap.
295  if (Entries[index].Begin == begin && Entries[index].End == end) {
296  // If the types match exactly, great.
297  if (Entries[index].Type == type) return;
298 
299  // If either type is opaque, make the entry opaque and return.
300  if (Entries[index].Type == nullptr) {
301  return;
302  } else if (type == nullptr) {
303  Entries[index].Type = nullptr;
304  return;
305  }
306 
307  // If they disagree in an ABI-agnostic way, just resolve the conflict
308  // arbitrarily.
309  if (auto entryType = getCommonType(Entries[index].Type, type)) {
310  Entries[index].Type = entryType;
311  return;
312  }
313 
314  // Otherwise, make the entry opaque.
315  Entries[index].Type = nullptr;
316  return;
317  }
318 
319  // Okay, we have an overlapping conflict of some sort.
320 
321  // If we have a vector type, split it.
322  if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
323  auto eltTy = vecTy->getElementType();
324  CharUnits eltSize = (end - begin) / vecTy->getNumElements();
325  assert(eltSize == getTypeStoreSize(CGM, eltTy));
326  for (unsigned i = 0, e = vecTy->getNumElements(); i != e; ++i) {
327  addEntry(eltTy, begin, begin + eltSize);
328  begin += eltSize;
329  }
330  assert(begin == end);
331  return;
332  }
333 
334  // If the entry is a vector type, split it and try again.
335  if (Entries[index].Type && Entries[index].Type->isVectorTy()) {
336  splitVectorEntry(index);
337  goto restartAfterSplit;
338  }
339 
340  // Okay, we have no choice but to make the existing entry opaque.
341 
342  Entries[index].Type = nullptr;
343 
344  // Stretch the start of the entry to the beginning of the range.
345  if (begin < Entries[index].Begin) {
346  Entries[index].Begin = begin;
347  assert(index == 0 || begin >= Entries[index - 1].End);
348  }
349 
350  // Stretch the end of the entry to the end of the range; but if we run
351  // into the start of the next entry, just leave the range there and repeat.
352  while (end > Entries[index].End) {
353  assert(Entries[index].Type == nullptr);
354 
355  // If the range doesn't overlap the next entry, we're done.
356  if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) {
357  Entries[index].End = end;
358  break;
359  }
360 
361  // Otherwise, stretch to the start of the next entry.
362  Entries[index].End = Entries[index + 1].Begin;
363 
364  // Continue with the next entry.
365  index++;
366 
367  // This entry needs to be made opaque if it is not already.
368  if (Entries[index].Type == nullptr)
369  continue;
370 
371  // Split vector entries unless we completely subsume them.
372  if (Entries[index].Type->isVectorTy() &&
373  end < Entries[index].End) {
374  splitVectorEntry(index);
375  }
376 
377  // Make the entry opaque.
378  Entries[index].Type = nullptr;
379  }
380 }
381 
382 /// Replace the entry of vector type at offset 'index' with a sequence
383 /// of its component vectors.
384 void SwiftAggLowering::splitVectorEntry(unsigned index) {
385  auto vecTy = cast<llvm::VectorType>(Entries[index].Type);
386  auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy);
387 
388  auto eltTy = split.first;
389  CharUnits eltSize = getTypeStoreSize(CGM, eltTy);
390  auto numElts = split.second;
391  Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
392 
393  CharUnits begin = Entries[index].Begin;
394  for (unsigned i = 0; i != numElts; ++i) {
395  Entries[index].Type = eltTy;
396  Entries[index].Begin = begin;
397  Entries[index].End = begin + eltSize;
398  begin += eltSize;
399  }
400 }
401 
402 /// Given a power-of-two unit size, return the offset of the aligned unit
403 /// of that size which contains the given offset.
404 ///
405 /// In other words, round down to the nearest multiple of the unit size.
407  assert(isPowerOf2(unitSize.getQuantity()));
408  auto unitMask = ~(unitSize.getQuantity() - 1);
409  return CharUnits::fromQuantity(offset.getQuantity() & unitMask);
410 }
411 
412 static bool areBytesInSameUnit(CharUnits first, CharUnits second,
413  CharUnits chunkSize) {
414  return getOffsetAtStartOfUnit(first, chunkSize)
415  == getOffsetAtStartOfUnit(second, chunkSize);
416 }
417 
419  if (Entries.empty()) {
420  Finished = true;
421  return;
422  }
423 
424  // We logically split the layout down into a series of chunks of this size,
425  // which is generally the size of a pointer.
426  const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
427 
428  // First pass: if two entries share a chunk, make them both opaque
429  // and stretch one to meet the next.
430  bool hasOpaqueEntries = (Entries[0].Type == nullptr);
431  for (size_t i = 1, e = Entries.size(); i != e; ++i) {
432  if (areBytesInSameUnit(Entries[i - 1].End - CharUnits::One(),
433  Entries[i].Begin, chunkSize)) {
434  Entries[i - 1].Type = nullptr;
435  Entries[i].Type = nullptr;
436  Entries[i - 1].End = Entries[i].Begin;
437  hasOpaqueEntries = true;
438 
439  } else if (Entries[i].Type == nullptr) {
440  hasOpaqueEntries = true;
441  }
442  }
443 
444  // The rest of the algorithm leaves non-opaque entries alone, so if we
445  // have no opaque entries, we're done.
446  if (!hasOpaqueEntries) {
447  Finished = true;
448  return;
449  }
450 
451  // Okay, move the entries to a temporary and rebuild Entries.
452  auto orig = std::move(Entries);
453  assert(Entries.empty());
454 
455  for (size_t i = 0, e = orig.size(); i != e; ++i) {
456  // Just copy over non-opaque entries.
457  if (orig[i].Type != nullptr) {
458  Entries.push_back(orig[i]);
459  continue;
460  }
461 
462  // Scan forward to determine the full extent of the next opaque range.
463  // We know from the first pass that only contiguous ranges will overlap
464  // the same aligned chunk.
465  auto begin = orig[i].Begin;
466  auto end = orig[i].End;
467  while (i + 1 != e &&
468  orig[i + 1].Type == nullptr &&
469  end == orig[i + 1].Begin) {
470  end = orig[i + 1].End;
471  i++;
472  }
473 
474  // Add an entry per intersected chunk.
475  do {
476  // Find the smallest aligned storage unit in the maximal aligned
477  // storage unit containing 'begin' that contains all the bytes in
478  // the intersection between the range and this chunk.
479  CharUnits localBegin = begin;
480  CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize);
481  CharUnits chunkEnd = chunkBegin + chunkSize;
482  CharUnits localEnd = std::min(end, chunkEnd);
483 
484  // Just do a simple loop over ever-increasing unit sizes.
485  CharUnits unitSize = CharUnits::One();
486  CharUnits unitBegin, unitEnd;
487  for (; ; unitSize *= 2) {
488  assert(unitSize <= chunkSize);
489  unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize);
490  unitEnd = unitBegin + unitSize;
491  if (unitEnd >= localEnd) break;
492  }
493 
494  // Add an entry for this unit.
495  auto entryTy =
496  llvm::IntegerType::get(CGM.getLLVMContext(),
497  CGM.getContext().toBits(unitSize));
498  Entries.push_back({unitBegin, unitEnd, entryTy});
499 
500  // The next chunk starts where this chunk left off.
501  begin = localEnd;
502  } while (begin != end);
503  }
504 
505  // Okay, finally finished.
506  Finished = true;
507 }
508 
510  assert(Finished && "haven't yet finished lowering");
511 
512  for (auto &entry : Entries) {
513  callback(entry.Begin, entry.End, entry.Type);
514  }
515 }
516 
517 std::pair<llvm::StructType*, llvm::Type*>
519  assert(Finished && "haven't yet finished lowering");
520 
521  auto &ctx = CGM.getLLVMContext();
522 
523  if (Entries.empty()) {
524  auto type = llvm::StructType::get(ctx);
525  return { type, type };
526  }
527 
529  CharUnits lastEnd = CharUnits::Zero();
530  bool hasPadding = false;
531  bool packed = false;
532  for (auto &entry : Entries) {
533  if (entry.Begin != lastEnd) {
534  auto paddingSize = entry.Begin - lastEnd;
535  assert(!paddingSize.isNegative());
536 
537  auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
538  paddingSize.getQuantity());
539  elts.push_back(padding);
540  hasPadding = true;
541  }
542 
543  if (!packed && !entry.Begin.isMultipleOf(
545  CGM.getDataLayout().getABITypeAlignment(entry.Type))))
546  packed = true;
547 
548  elts.push_back(entry.Type);
549 
550  lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type);
551  assert(entry.End <= lastEnd);
552  }
553 
554  // We don't need to adjust 'packed' to deal with possible tail padding
555  // because we never do that kind of access through the coercion type.
556  auto coercionType = llvm::StructType::get(ctx, elts, packed);
557 
558  llvm::Type *unpaddedType = coercionType;
559  if (hasPadding) {
560  elts.clear();
561  for (auto &entry : Entries) {
562  elts.push_back(entry.Type);
563  }
564  if (elts.size() == 1) {
565  unpaddedType = elts[0];
566  } else {
567  unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false);
568  }
569  } else if (Entries.size() == 1) {
570  unpaddedType = Entries[0].Type;
571  }
572 
573  return { coercionType, unpaddedType };
574 }
575 
576 bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
577  assert(Finished && "haven't yet finished lowering");
578 
579  // Empty types don't need to be passed indirectly.
580  if (Entries.empty()) return false;
581 
582  CharUnits totalSize = Entries.back().End;
583 
584  // Avoid copying the array of types when there's just a single element.
585  if (Entries.size() == 1) {
586  return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(totalSize,
587  Entries.back().Type,
588  asReturnValue);
589  }
590 
591  SmallVector<llvm::Type*, 8> componentTys;
592  componentTys.reserve(Entries.size());
593  for (auto &entry : Entries) {
594  componentTys.push_back(entry.Type);
595  }
596  return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(totalSize,
597  componentTys,
598  asReturnValue);
599 }
600 
602  // Currently always the size of an ordinary pointer.
603  return CGM.getContext().toCharUnitsFromBits(
605 }
606 
608  // For Swift's purposes, this is always just the store size of the type
609  // rounded up to a power of 2.
610  auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity();
611  if (!isPowerOf2(size)) {
612  size = 1ULL << (llvm::findLastSet(size, llvm::ZB_Undefined) + 1);
613  }
614  assert(size >= CGM.getDataLayout().getABITypeAlignment(type));
615  return CharUnits::fromQuantity(size);
616 }
617 
619  llvm::IntegerType *intTy) {
620  auto size = intTy->getBitWidth();
621  switch (size) {
622  case 1:
623  case 8:
624  case 16:
625  case 32:
626  case 64:
627  // Just assume that the above are always legal.
628  return true;
629 
630  case 128:
631  return CGM.getContext().getTargetInfo().hasInt128Type();
632 
633  default:
634  return false;
635  }
636 }
637 
639  llvm::VectorType *vectorTy) {
640  return isLegalVectorType(CGM, vectorSize, vectorTy->getElementType(),
641  vectorTy->getNumElements());
642 }
643 
645  llvm::Type *eltTy, unsigned numElts) {
646  assert(numElts > 1 && "illegal vector length");
647  return getSwiftABIInfo(CGM)
648  .isLegalVectorTypeForSwift(vectorSize, eltTy, numElts);
649 }
650 
651 std::pair<llvm::Type*, unsigned>
653  llvm::VectorType *vectorTy) {
654  auto numElts = vectorTy->getNumElements();
655  auto eltTy = vectorTy->getElementType();
656 
657  // Try to split the vector type in half.
658  if (numElts >= 4 && isPowerOf2(numElts)) {
659  if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
660  return {llvm::VectorType::get(eltTy, numElts / 2), 2};
661  }
662 
663  return {eltTy, numElts};
664 }
665 
667  llvm::VectorType *origVectorTy,
669  // If it's already a legal vector type, use it.
670  if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) {
671  components.push_back(origVectorTy);
672  return;
673  }
674 
675  // Try to split the vector into legal subvectors.
676  auto numElts = origVectorTy->getNumElements();
677  auto eltTy = origVectorTy->getElementType();
678  assert(numElts != 1);
679 
680  // The largest size that we're still considering making subvectors of.
681  // Always a power of 2.
682  unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
683  unsigned candidateNumElts = 1U << logCandidateNumElts;
684  assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
685 
686  // Minor optimization: don't check the legality of this exact size twice.
687  if (candidateNumElts == numElts) {
688  logCandidateNumElts--;
689  candidateNumElts >>= 1;
690  }
691 
692  CharUnits eltSize = (origVectorSize / numElts);
693  CharUnits candidateSize = eltSize * candidateNumElts;
694 
695  // The sensibility of this algorithm relies on the fact that we never
696  // have a legal non-power-of-2 vector size without having the power of 2
697  // also be legal.
698  while (logCandidateNumElts > 0) {
699  assert(candidateNumElts == 1U << logCandidateNumElts);
700  assert(candidateNumElts <= numElts);
701  assert(candidateSize == eltSize * candidateNumElts);
702 
703  // Skip illegal vector sizes.
704  if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) {
705  logCandidateNumElts--;
706  candidateNumElts /= 2;
707  candidateSize /= 2;
708  continue;
709  }
710 
711  // Add the right number of vectors of this size.
712  auto numVecs = numElts >> logCandidateNumElts;
713  components.append(numVecs, llvm::VectorType::get(eltTy, candidateNumElts));
714  numElts -= (numVecs << logCandidateNumElts);
715 
716  if (numElts == 0) return;
717 
718  // It's possible that the number of elements remaining will be legal.
719  // This can happen with e.g. <7 x float> when <3 x float> is legal.
720  // This only needs to be separately checked if it's not a power of 2.
721  if (numElts > 2 && !isPowerOf2(numElts) &&
722  isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
723  components.push_back(llvm::VectorType::get(eltTy, numElts));
724  return;
725  }
726 
727  // Bring vecSize down to something no larger than numElts.
728  do {
729  logCandidateNumElts--;
730  candidateNumElts /= 2;
731  candidateSize /= 2;
732  } while (candidateNumElts > numElts);
733  }
734 
735  // Otherwise, just append a bunch of individual elements.
736  components.append(numElts, eltTy);
737 }
738 
740  const CXXRecordDecl *record) {
741  // Following a recommendation from Richard Smith, pass a C++ type
742  // indirectly only if the destructor is non-trivial or *all* of the
743  // copy/move constructors are deleted or non-trivial.
744 
745  if (record->hasNonTrivialDestructor())
746  return true;
747 
748  // It would be nice if this were summarized on the CXXRecordDecl.
749  for (auto ctor : record->ctors()) {
750  if (ctor->isCopyOrMoveConstructor() && !ctor->isDeleted() &&
751  ctor->isTrivial()) {
752  return false;
753  }
754  }
755 
756  return true;
757 }
758 
760  bool forReturn,
761  CharUnits alignmentForIndirect) {
762  if (lowering.empty()) {
763  return ABIArgInfo::getIgnore();
764  } else if (lowering.shouldPassIndirectly(forReturn)) {
765  return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false);
766  } else {
767  auto types = lowering.getCoerceAndExpandTypes();
768  return ABIArgInfo::getCoerceAndExpand(types.first, types.second);
769  }
770 }
771 
773  bool forReturn) {
774  if (auto recordType = dyn_cast<RecordType>(type)) {
775  auto record = recordType->getDecl();
776  auto &layout = CGM.getContext().getASTRecordLayout(record);
777 
778  if (auto cxxRecord = dyn_cast<CXXRecordDecl>(record)) {
779  if (shouldPassCXXRecordIndirectly(CGM, cxxRecord))
780  return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
781  }
782 
783  SwiftAggLowering lowering(CGM);
784  lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
785  lowering.finish();
786 
787  return classifyExpandedType(lowering, forReturn, layout.getAlignment());
788  }
789 
790  // Just assume that all of our target ABIs can support returning at least
791  // two integer or floating-point values.
792  if (isa<ComplexType>(type)) {
793  return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand());
794  }
795 
796  // Vector types may need to be legalized.
797  if (isa<VectorType>(type)) {
798  SwiftAggLowering lowering(CGM);
799  lowering.addTypedData(type, CharUnits::Zero());
800  lowering.finish();
801 
802  CharUnits alignment = CGM.getContext().getTypeAlignInChars(type);
803  return classifyExpandedType(lowering, forReturn, alignment);
804  }
805 
806  // Member pointer types need to be expanded, but it's a simple form of
807  // expansion that 'Direct' can handle. Note that CanBeFlattened should be
808  // true for this to work.
809 
810  // 'void' needs to be ignored.
811  if (type->isVoidType()) {
812  return ABIArgInfo::getIgnore();
813  }
814 
815  // Everything else can be passed directly.
816  return ABIArgInfo::getDirect();
817 }
818 
820  return classifyType(CGM, type, /*forReturn*/ true);
821 }
822 
824  CanQualType type) {
825  return classifyType(CGM, type, /*forReturn*/ false);
826 }
827 
829  auto &retInfo = FI.getReturnInfo();
830  retInfo = classifyReturnType(CGM, FI.getReturnType());
831 
832  for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) {
833  auto &argInfo = FI.arg_begin()[i];
834  argInfo.info = classifyArgumentType(CGM, argInfo.type);
835  }
836 }
837 
838 // Is swifterror lowered to a register by the target ABI.
841 }
A (possibly-)qualified type.
Definition: Type.h:616
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
Definition: TargetInfo.h:53
bool empty() const
Does this lowering require passing any data?
CanQualType getReturnType() const
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:2434
llvm::LLVMContext & getLLVMContext()
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
ctor_range ctors() const
Definition: DeclCXX.h:798
bool hasNonTrivialDestructor() const
Determine whether this class has a non-trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1356
bool shouldPassIndirectly(bool asReturnValue) const
According to the target Swift ABI, should a value with this lowering be passed indirectly?
const llvm::DataLayout & getDataLayout() const
The base class of the type hierarchy.
Definition: Type.h:1303
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:227
void enumerateComponents(EnumerationCallback callback) const
Enumerate the expanded components of this type.
std::pair< llvm::Type *, unsigned > splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Minimally split a legal vector type.
static ABIArgInfo getIgnore()
virtual bool shouldPassIndirectlyForSwift(CharUnits totalSize, ArrayRef< llvm::Type * > types, bool asReturnValue) const =0
RecordDecl - Represents a struct/union/class.
Definition: Decl.h:3354
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
Definition: Decl.h:2366
static bool isPowerOf2(unsigned n)
ABIArgInfo classifyReturnType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to return a particular type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:643
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
field_range fields() const
Definition: Decl.h:3483
llvm::function_ref< void(CharUnits offset, CharUnits end, llvm::Type *type)> EnumerationCallback
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
virtual bool isSwiftErrorInRegister() const =0
static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering, bool forReturn, CharUnits alignmentForIndirect)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:177
static ABIArgInfo getExpand()
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool areBytesInSameUnit(CharUnits first, CharUnits second, CharUnits chunkSize)
bool isUnion() const
Definition: Decl.h:3028
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
static llvm::Type * getCommonType(llvm::Type *first, llvm::Type *second)
Given two types with the same size, try to find a common type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
CharUnits getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type)
Return the Swift CC's notion of the natural alignment of a type.
const SmallVectorImpl< AnnotatedLine * >::const_iterator End
bool hasOwnVBPtr() const
hasOwnVBPtr - Does this class provide its own virtual-base table pointer, rather than inheriting one ...
Definition: RecordLayout.h:266
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:34
void addOpaqueData(CharUnits begin, CharUnits end)
bool isSwiftErrorLoweredInRegister(CodeGenModule &CGM)
Is swifterror lowered to a register by the target ABI.
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
Definition: TargetInfo.cpp:129
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:219
ASTContext & getContext() const
SourceLocation Begin
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CharUnits getVBPtrOffset() const
getVBPtrOffset - Get the offset for virtual base table pointer.
Definition: RecordLayout.h:292
void legalizeVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy, llvm::SmallVectorImpl< llvm::Type * > &types)
Turn a vector type in a sequence of legal component vector types.
The l-value was considered opaque, so the alignment was determined from a type.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
void addTypedData(QualType type, CharUnits begin)
unsigned getBitWidthValue(const ASTContext &Ctx) const
Definition: Decl.cpp:3599
static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type)
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2235
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
Definition: CharUnits.h:137
static const SwiftABIInfo & getSwiftABIInfo(CodeGenModule &CGM)
static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize)
Given a power-of-two unit size, return the offset of the aligned unit of that size which contains the...
bool shouldPassCXXRecordIndirectly(CodeGenModule &CGM, const CXXRecordDecl *record)
Should a C++ record type be passed and returned indirectly?
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
std::pair< llvm::StructType *, llvm::Type * > getCoerceAndExpandTypes() const
Return the types for a coerce-and-expand operation.
bool isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Is the given vector type "legal" for Swift's perspective on the current platform?
A refining implementation of ABIInfo for targets that support swiftcall.
Definition: ABIInfo.h:134
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Definition: ASTMatchers.h:2126
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:2442
virtual bool hasInt128Type() const
Determine whether the __int128 type is supported on this target.
Definition: TargetInfo.h:358
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:3784
Complex values, per C99 6.2.5p11.
Definition: Type.h:2164
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:6042
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:307
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
Represents a C++ struct/union/class.
Definition: DeclCXX.h:267
bool isLegalIntegerType(CodeGenModule &CGM, llvm::IntegerType *type)
Is the given integer type "legal" for Swift's perspective on the current platform?
bool isArrayType() const
Definition: Type.h:5751
Defines the clang::TargetInfo interface.
const_arg_iterator arg_begin() const
bool hasOwnVFPtr() const
hasOwnVFPtr - Does this class provide its own virtual-function table pointer, rather than inheriting ...
Definition: RecordLayout.h:246
CharUnits getMaximumVoluntaryIntegerSize(CodeGenModule &CGM)
Return the maximum voluntary integer size for the current target.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)