LLVM 17.0.0git
CallLowering.cpp
Go to the documentation of this file.
1//===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements some simple delegations needed for call lowering.
11///
12//===----------------------------------------------------------------------===//
13
23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/LLVMContext.h"
25#include "llvm/IR/Module.h"
27
28#define DEBUG_TYPE "call-lowering"
29
30using namespace llvm;
31
32void CallLowering::anchor() {}
33
34/// Helper function which updates \p Flags when \p AttrFn returns true.
35static void
37 const std::function<bool(Attribute::AttrKind)> &AttrFn) {
38 if (AttrFn(Attribute::SExt))
39 Flags.setSExt();
40 if (AttrFn(Attribute::ZExt))
41 Flags.setZExt();
42 if (AttrFn(Attribute::InReg))
43 Flags.setInReg();
44 if (AttrFn(Attribute::StructRet))
45 Flags.setSRet();
46 if (AttrFn(Attribute::Nest))
47 Flags.setNest();
48 if (AttrFn(Attribute::ByVal))
49 Flags.setByVal();
50 if (AttrFn(Attribute::Preallocated))
51 Flags.setPreallocated();
52 if (AttrFn(Attribute::InAlloca))
53 Flags.setInAlloca();
54 if (AttrFn(Attribute::Returned))
55 Flags.setReturned();
56 if (AttrFn(Attribute::SwiftSelf))
57 Flags.setSwiftSelf();
58 if (AttrFn(Attribute::SwiftAsync))
59 Flags.setSwiftAsync();
60 if (AttrFn(Attribute::SwiftError))
61 Flags.setSwiftError();
62}
63
65 unsigned ArgIdx) const {
67 addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
68 return Call.paramHasAttr(ArgIdx, Attr);
69 });
70 return Flags;
71}
72
77 return Call.hasRetAttr(Attr);
78 });
79 return Flags;
80}
81
83 const AttributeList &Attrs,
84 unsigned OpIdx) const {
85 addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
86 return Attrs.hasAttributeAtIndex(OpIdx, Attr);
87 });
88}
89
91 ArrayRef<Register> ResRegs,
93 Register SwiftErrorVReg,
94 std::function<unsigned()> GetCalleeReg) const {
96 const DataLayout &DL = MIRBuilder.getDataLayout();
97 MachineFunction &MF = MIRBuilder.getMF();
99 bool CanBeTailCalled = CB.isTailCall() &&
101 (MF.getFunction()
102 .getFnAttribute("disable-tail-calls")
103 .getValueAsString() != "true");
104
105 CallingConv::ID CallConv = CB.getCallingConv();
106 Type *RetTy = CB.getType();
107 bool IsVarArg = CB.getFunctionType()->isVarArg();
108
110 getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
111 Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
112
113 if (!Info.CanLowerReturn) {
114 // Callee requires sret demotion.
115 insertSRetOutgoingArgument(MIRBuilder, CB, Info);
116
117 // The sret demotion isn't compatible with tail-calls, since the sret
118 // argument points into the caller's stack frame.
119 CanBeTailCalled = false;
120 }
121
122
123 // First step is to marshall all the function's parameters into the correct
124 // physregs and memory locations. Gather the sequence of argument types that
125 // we'll pass to the assigner function.
126 unsigned i = 0;
127 unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
128 for (const auto &Arg : CB.args()) {
129 ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i),
130 i < NumFixedArgs};
132
133 // If we have an explicit sret argument that is an Instruction, (i.e., it
134 // might point to function-local memory), we can't meaningfully tail-call.
135 if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
136 CanBeTailCalled = false;
137
138 Info.OrigArgs.push_back(OrigArg);
139 ++i;
140 }
141
142 // Try looking through a bitcast from one function type to another.
143 // Commonly happens with calls to objc_msgSend().
144 const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
145 if (const Function *F = dyn_cast<Function>(CalleeV))
146 Info.Callee = MachineOperand::CreateGA(F, 0);
147 else
148 Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
149
150 Register ReturnHintAlignReg;
151 Align ReturnHintAlign;
152
153 Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, getAttributesForReturn(CB)};
154
155 if (!Info.OrigRet.Ty->isVoidTy()) {
157
158 if (MaybeAlign Alignment = CB.getRetAlign()) {
159 if (*Alignment > Align(1)) {
160 ReturnHintAlignReg = MRI.cloneVirtualRegister(ResRegs[0]);
161 Info.OrigRet.Regs[0] = ReturnHintAlignReg;
162 ReturnHintAlign = *Alignment;
163 }
164 }
165 }
166
167 auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi);
168 if (Bundle && CB.isIndirectCall()) {
169 Info.CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
170 assert(Info.CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
171 }
172
173 Info.CB = &CB;
174 Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
175 Info.CallConv = CallConv;
176 Info.SwiftErrorVReg = SwiftErrorVReg;
177 Info.IsMustTailCall = CB.isMustTailCall();
178 Info.IsTailCall = CanBeTailCalled;
179 Info.IsVarArg = IsVarArg;
180 if (!lowerCall(MIRBuilder, Info))
181 return false;
182
183 if (ReturnHintAlignReg && !Info.IsTailCall) {
184 MIRBuilder.buildAssertAlign(ResRegs[0], ReturnHintAlignReg,
185 ReturnHintAlign);
186 }
187
188 return true;
189}
190
191template <typename FuncInfoTy>
193 const DataLayout &DL,
194 const FuncInfoTy &FuncInfo) const {
195 auto &Flags = Arg.Flags[0];
196 const AttributeList &Attrs = FuncInfo.getAttributes();
197 addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
198
199 PointerType *PtrTy = dyn_cast<PointerType>(Arg.Ty->getScalarType());
200 if (PtrTy) {
201 Flags.setPointer();
202 Flags.setPointerAddrSpace(PtrTy->getPointerAddressSpace());
203 }
204
205 Align MemAlign = DL.getABITypeAlign(Arg.Ty);
206 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
208 unsigned ParamIdx = OpIdx - AttributeList::FirstArgIndex;
209
210 Type *ElementTy = FuncInfo.getParamByValType(ParamIdx);
211 if (!ElementTy)
212 ElementTy = FuncInfo.getParamInAllocaType(ParamIdx);
213 if (!ElementTy)
214 ElementTy = FuncInfo.getParamPreallocatedType(ParamIdx);
215 assert(ElementTy && "Must have byval, inalloca or preallocated type");
216 Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
217
218 // For ByVal, alignment should be passed from FE. BE will guess if
219 // this info is not there but there are cases it cannot get right.
220 if (auto ParamAlign = FuncInfo.getParamStackAlign(ParamIdx))
221 MemAlign = *ParamAlign;
222 else if ((ParamAlign = FuncInfo.getParamAlign(ParamIdx)))
223 MemAlign = *ParamAlign;
224 else
225 MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
226 } else if (OpIdx >= AttributeList::FirstArgIndex) {
227 if (auto ParamAlign =
228 FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
229 MemAlign = *ParamAlign;
230 }
231 Flags.setMemAlign(MemAlign);
232 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
233
234 // Don't try to use the returned attribute if the argument is marked as
235 // swiftself, since it won't be passed in x0.
236 if (Flags.isSwiftSelf())
237 Flags.setReturned(false);
238}
239
240template void
241CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
242 const DataLayout &DL,
243 const Function &FuncInfo) const;
244
245template void
246CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
247 const DataLayout &DL,
248 const CallBase &FuncInfo) const;
249
251 SmallVectorImpl<ArgInfo> &SplitArgs,
252 const DataLayout &DL,
253 CallingConv::ID CallConv,
254 SmallVectorImpl<uint64_t> *Offsets) const {
255 LLVMContext &Ctx = OrigArg.Ty->getContext();
256
257 SmallVector<EVT, 4> SplitVTs;
258 ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, Offsets, 0);
259
260 if (SplitVTs.size() == 0)
261 return;
262
263 if (SplitVTs.size() == 1) {
264 // No splitting to do, but we want to replace the original type (e.g. [1 x
265 // double] -> double).
266 SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
267 OrigArg.OrigArgIndex, OrigArg.Flags[0],
268 OrigArg.IsFixed, OrigArg.OrigValue);
269 return;
270 }
271
272 // Create one ArgInfo for each virtual register in the original ArgInfo.
273 assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
274
275 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
276 OrigArg.Ty, CallConv, false, DL);
277 for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
278 Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
279 SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.OrigArgIndex,
280 OrigArg.Flags[0], OrigArg.IsFixed);
281 if (NeedsRegBlock)
282 SplitArgs.back().Flags[0].setInConsecutiveRegs();
283 }
284
285 SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
286}
287
288/// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
291 ArrayRef<Register> SrcRegs) {
292 MachineRegisterInfo &MRI = *B.getMRI();
293 LLT LLTy = MRI.getType(DstRegs[0]);
294 LLT PartLLT = MRI.getType(SrcRegs[0]);
295
296 // Deal with v3s16 split into v2s16
297 LLT LCMTy = getCoverTy(LLTy, PartLLT);
298 if (LCMTy == LLTy) {
299 // Common case where no padding is needed.
300 assert(DstRegs.size() == 1);
301 return B.buildConcatVectors(DstRegs[0], SrcRegs);
302 }
303
304 // We need to create an unmerge to the result registers, which may require
305 // widening the original value.
306 Register UnmergeSrcReg;
307 if (LCMTy != PartLLT) {
308 assert(DstRegs.size() == 1);
309 return B.buildDeleteTrailingVectorElements(
310 DstRegs[0], B.buildMergeLikeInstr(LCMTy, SrcRegs));
311 } else {
312 // We don't need to widen anything if we're extracting a scalar which was
313 // promoted to a vector e.g. s8 -> v4s8 -> s8
314 assert(SrcRegs.size() == 1);
315 UnmergeSrcReg = SrcRegs[0];
316 }
317
318 int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
319
320 SmallVector<Register, 8> PadDstRegs(NumDst);
321 std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
322
323 // Create the excess dead defs for the unmerge.
324 for (int I = DstRegs.size(); I != NumDst; ++I)
325 PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
326
327 if (PadDstRegs.size() == 1)
328 return B.buildDeleteTrailingVectorElements(DstRegs[0], UnmergeSrcReg);
329 return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
330}
331
332/// Create a sequence of instructions to combine pieces split into register
333/// typed values to the original IR value. \p OrigRegs contains the destination
334/// value registers of type \p LLTy, and \p Regs contains the legalized pieces
335/// with type \p PartLLT. This is used for incoming values (physregs to vregs).
337 ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
338 const ISD::ArgFlagsTy Flags) {
339 MachineRegisterInfo &MRI = *B.getMRI();
340
341 if (PartLLT == LLTy) {
342 // We should have avoided introducing a new virtual register, and just
343 // directly assigned here.
344 assert(OrigRegs[0] == Regs[0]);
345 return;
346 }
347
348 if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
349 Regs.size() == 1) {
350 B.buildBitcast(OrigRegs[0], Regs[0]);
351 return;
352 }
353
354 // A vector PartLLT needs extending to LLTy's element size.
355 // E.g. <2 x s64> = G_SEXT <2 x s32>.
356 if (PartLLT.isVector() == LLTy.isVector() &&
357 PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
358 (!PartLLT.isVector() ||
359 PartLLT.getNumElements() == LLTy.getNumElements()) &&
360 OrigRegs.size() == 1 && Regs.size() == 1) {
361 Register SrcReg = Regs[0];
362
363 LLT LocTy = MRI.getType(SrcReg);
364
365 if (Flags.isSExt()) {
366 SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
367 .getReg(0);
368 } else if (Flags.isZExt()) {
369 SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
370 .getReg(0);
371 }
372
373 // Sometimes pointers are passed zero extended.
374 LLT OrigTy = MRI.getType(OrigRegs[0]);
375 if (OrigTy.isPointer()) {
376 LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits());
377 B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));
378 return;
379 }
380
381 B.buildTrunc(OrigRegs[0], SrcReg);
382 return;
383 }
384
385 if (!LLTy.isVector() && !PartLLT.isVector()) {
386 assert(OrigRegs.size() == 1);
387 LLT OrigTy = MRI.getType(OrigRegs[0]);
388
389 unsigned SrcSize = PartLLT.getSizeInBits().getFixedValue() * Regs.size();
390 if (SrcSize == OrigTy.getSizeInBits())
391 B.buildMergeValues(OrigRegs[0], Regs);
392 else {
393 auto Widened = B.buildMergeLikeInstr(LLT::scalar(SrcSize), Regs);
394 B.buildTrunc(OrigRegs[0], Widened);
395 }
396
397 return;
398 }
399
400 if (PartLLT.isVector()) {
401 assert(OrigRegs.size() == 1);
402 SmallVector<Register> CastRegs(Regs.begin(), Regs.end());
403
404 // If PartLLT is a mismatched vector in both number of elements and element
405 // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
406 // have the same elt type, i.e. v4s32.
407 if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
408 PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
409 Regs.size() == 1) {
410 LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
411 .changeElementCount(PartLLT.getElementCount() * 2);
412 CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
413 PartLLT = NewTy;
414 }
415
416 if (LLTy.getScalarType() == PartLLT.getElementType()) {
417 mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
418 } else {
419 unsigned I = 0;
420 LLT GCDTy = getGCDType(LLTy, PartLLT);
421
422 // We are both splitting a vector, and bitcasting its element types. Cast
423 // the source pieces into the appropriate number of pieces with the result
424 // element type.
425 for (Register SrcReg : CastRegs)
426 CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
427 mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
428 }
429
430 return;
431 }
432
433 assert(LLTy.isVector() && !PartLLT.isVector());
434
435 LLT DstEltTy = LLTy.getElementType();
436
437 // Pointer information was discarded. We'll need to coerce some register types
438 // to avoid violating type constraints.
439 LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
440
441 assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
442
443 if (DstEltTy == PartLLT) {
444 // Vector was trivially scalarized.
445
446 if (RealDstEltTy.isPointer()) {
447 for (Register Reg : Regs)
448 MRI.setType(Reg, RealDstEltTy);
449 }
450
451 B.buildBuildVector(OrigRegs[0], Regs);
452 } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
453 // Deal with vector with 64-bit elements decomposed to 32-bit
454 // registers. Need to create intermediate 64-bit elements.
455 SmallVector<Register, 8> EltMerges;
456 int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
457
458 assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
459
460 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
461 auto Merge =
462 B.buildMergeLikeInstr(RealDstEltTy, Regs.take_front(PartsPerElt));
463 // Fix the type in case this is really a vector of pointers.
464 MRI.setType(Merge.getReg(0), RealDstEltTy);
465 EltMerges.push_back(Merge.getReg(0));
466 Regs = Regs.drop_front(PartsPerElt);
467 }
468
469 B.buildBuildVector(OrigRegs[0], EltMerges);
470 } else {
471 // Vector was split, and elements promoted to a wider type.
472 // FIXME: Should handle floating point promotions.
473 LLT BVType = LLT::fixed_vector(LLTy.getNumElements(), PartLLT);
474 auto BV = B.buildBuildVector(BVType, Regs);
475 B.buildTrunc(OrigRegs[0], BV);
476 }
477}
478
479/// Create a sequence of instructions to expand the value in \p SrcReg (of type
480/// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
481/// contain the type of scalar value extension if necessary.
482///
483/// This is used for outgoing values (vregs to physregs)
485 Register SrcReg, LLT SrcTy, LLT PartTy,
486 unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
487 // We could just insert a regular copy, but this is unreachable at the moment.
488 assert(SrcTy != PartTy && "identical part types shouldn't reach here");
489
490 const unsigned PartSize = PartTy.getSizeInBits();
491
492 if (PartTy.isVector() == SrcTy.isVector() &&
493 PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
494 assert(DstRegs.size() == 1);
495 B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
496 return;
497 }
498
499 if (SrcTy.isVector() && !PartTy.isVector() &&
500 PartSize > SrcTy.getElementType().getSizeInBits()) {
501 // Vector was scalarized, and the elements extended.
502 auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
503 for (int i = 0, e = DstRegs.size(); i != e; ++i)
504 B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
505 return;
506 }
507
508 if (SrcTy.isVector() && PartTy.isVector() &&
509 PartTy.getScalarSizeInBits() == SrcTy.getScalarSizeInBits() &&
510 SrcTy.getNumElements() < PartTy.getNumElements()) {
511 // A coercion like: v2f32 -> v4f32.
512 Register DstReg = DstRegs.front();
513 B.buildPadVectorWithUndefElements(DstReg, SrcReg);
514 return;
515 }
516
517 LLT GCDTy = getGCDType(SrcTy, PartTy);
518 if (GCDTy == PartTy) {
519 // If this already evenly divisible, we can create a simple unmerge.
520 B.buildUnmerge(DstRegs, SrcReg);
521 return;
522 }
523
524 MachineRegisterInfo &MRI = *B.getMRI();
525 LLT DstTy = MRI.getType(DstRegs[0]);
526 LLT LCMTy = getCoverTy(SrcTy, PartTy);
527
528 if (PartTy.isVector() && LCMTy == PartTy) {
529 assert(DstRegs.size() == 1);
530 B.buildPadVectorWithUndefElements(DstRegs[0], SrcReg);
531 return;
532 }
533
534 const unsigned DstSize = DstTy.getSizeInBits();
535 const unsigned SrcSize = SrcTy.getSizeInBits();
536 unsigned CoveringSize = LCMTy.getSizeInBits();
537
538 Register UnmergeSrc = SrcReg;
539
540 if (!LCMTy.isVector() && CoveringSize != SrcSize) {
541 // For scalars, it's common to be able to use a simple extension.
542 if (SrcTy.isScalar() && DstTy.isScalar()) {
543 CoveringSize = alignTo(SrcSize, DstSize);
544 LLT CoverTy = LLT::scalar(CoveringSize);
545 UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);
546 } else {
547 // Widen to the common type.
548 // FIXME: This should respect the extend type
549 Register Undef = B.buildUndef(SrcTy).getReg(0);
550 SmallVector<Register, 8> MergeParts(1, SrcReg);
551 for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
552 MergeParts.push_back(Undef);
553 UnmergeSrc = B.buildMergeLikeInstr(LCMTy, MergeParts).getReg(0);
554 }
555 }
556
557 if (LCMTy.isVector() && CoveringSize != SrcSize)
558 UnmergeSrc = B.buildPadVectorWithUndefElements(LCMTy, SrcReg).getReg(0);
559
560 B.buildUnmerge(DstRegs, UnmergeSrc);
561}
562
564 ValueHandler &Handler, ValueAssigner &Assigner,
566 CallingConv::ID CallConv, bool IsVarArg,
567 ArrayRef<Register> ThisReturnRegs) const {
568 MachineFunction &MF = MIRBuilder.getMF();
569 const Function &F = MF.getFunction();
571
572 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
573 if (!determineAssignments(Assigner, Args, CCInfo))
574 return false;
575
576 return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
577 ThisReturnRegs);
578}
579
581 if (Flags.isSExt())
582 return TargetOpcode::G_SEXT;
583 if (Flags.isZExt())
584 return TargetOpcode::G_ZEXT;
585 return TargetOpcode::G_ANYEXT;
586}
587
590 CCState &CCInfo) const {
591 LLVMContext &Ctx = CCInfo.getContext();
592 const CallingConv::ID CallConv = CCInfo.getCallingConv();
593
594 unsigned NumArgs = Args.size();
595 for (unsigned i = 0; i != NumArgs; ++i) {
596 EVT CurVT = EVT::getEVT(Args[i].Ty);
597
598 MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
599
600 // If we need to split the type over multiple regs, check it's a scenario
601 // we currently support.
602 unsigned NumParts =
603 TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
604
605 if (NumParts == 1) {
606 // Try to use the register type if we couldn't assign the VT.
607 if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
608 Args[i].Flags[0], CCInfo))
609 return false;
610 continue;
611 }
612
613 // For incoming arguments (physregs to vregs), we could have values in
614 // physregs (or memlocs) which we want to extract and copy to vregs.
615 // During this, we might have to deal with the LLT being split across
616 // multiple regs, so we have to record this information for later.
617 //
618 // If we have outgoing args, then we have the opposite case. We have a
619 // vreg with an LLT which we want to assign to a physical location, and
620 // we might have to record that the value has to be split later.
621
622 // We're handling an incoming arg which is split over multiple regs.
623 // E.g. passing an s128 on AArch64.
624 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
625 Args[i].Flags.clear();
626
627 for (unsigned Part = 0; Part < NumParts; ++Part) {
628 ISD::ArgFlagsTy Flags = OrigFlags;
629 if (Part == 0) {
630 Flags.setSplit();
631 } else {
632 Flags.setOrigAlign(Align(1));
633 if (Part == NumParts - 1)
634 Flags.setSplitEnd();
635 }
636
637 Args[i].Flags.push_back(Flags);
638 if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
639 Args[i].Flags[Part], CCInfo)) {
640 // Still couldn't assign this smaller part type for some reason.
641 return false;
642 }
643 }
644 }
645
646 return true;
647}
648
651 CCState &CCInfo,
653 MachineIRBuilder &MIRBuilder,
654 ArrayRef<Register> ThisReturnRegs) const {
655 MachineFunction &MF = MIRBuilder.getMF();
657 const Function &F = MF.getFunction();
658 const DataLayout &DL = F.getParent()->getDataLayout();
659
660 const unsigned NumArgs = Args.size();
661
662 // Stores thunks for outgoing register assignments. This is used so we delay
663 // generating register copies until mem loc assignments are done. We do this
664 // so that if the target is using the delayed stack protector feature, we can
665 // find the split point of the block accurately. E.g. if we have:
666 // G_STORE %val, %memloc
667 // $x0 = COPY %foo
668 // $x1 = COPY %bar
669 // CALL func
670 // ... then the split point for the block will correctly be at, and including,
671 // the copy to $x0. If instead the G_STORE instruction immediately precedes
672 // the CALL, then we'd prematurely choose the CALL as the split point, thus
673 // generating a split block with a CALL that uses undefined physregs.
674 SmallVector<std::function<void()>> DelayedOutgoingRegAssignments;
675
676 for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
677 assert(j < ArgLocs.size() && "Skipped too many arg locs");
678 CCValAssign &VA = ArgLocs[j];
679 assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
680
681 if (VA.needsCustom()) {
682 std::function<void()> Thunk;
683 unsigned NumArgRegs = Handler.assignCustomValue(
684 Args[i], ArrayRef(ArgLocs).slice(j), &Thunk);
685 if (Thunk)
686 DelayedOutgoingRegAssignments.emplace_back(Thunk);
687 if (!NumArgRegs)
688 return false;
689 j += NumArgRegs;
690 continue;
691 }
692
693 const MVT ValVT = VA.getValVT();
694 const MVT LocVT = VA.getLocVT();
695
696 const LLT LocTy(LocVT);
697 const LLT ValTy(ValVT);
698 const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
699 const EVT OrigVT = EVT::getEVT(Args[i].Ty);
700 const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
701
702 // Expected to be multiple regs for a single incoming arg.
703 // There should be Regs.size() ArgLocs per argument.
704 // This should be the same as getNumRegistersForCallingConv
705 const unsigned NumParts = Args[i].Flags.size();
706
707 // Now split the registers into the assigned types.
708 Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
709
710 if (NumParts != 1 || NewLLT != OrigTy) {
711 // If we can't directly assign the register, we need one or more
712 // intermediate values.
713 Args[i].Regs.resize(NumParts);
714
715 // For each split register, create and assign a vreg that will store
716 // the incoming component of the larger value. These will later be
717 // merged to form the final vreg.
718 for (unsigned Part = 0; Part < NumParts; ++Part)
719 Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
720 }
721
722 assert((j + (NumParts - 1)) < ArgLocs.size() &&
723 "Too many regs for number of args");
724
725 // Coerce into outgoing value types before register assignment.
726 if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
727 assert(Args[i].OrigRegs.size() == 1);
728 buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
729 ValTy, extendOpFromFlags(Args[i].Flags[0]));
730 }
731
732 bool BigEndianPartOrdering = TLI->hasBigEndianPartOrdering(OrigVT, DL);
733 for (unsigned Part = 0; Part < NumParts; ++Part) {
734 Register ArgReg = Args[i].Regs[Part];
735 // There should be Regs.size() ArgLocs per argument.
736 unsigned Idx = BigEndianPartOrdering ? NumParts - 1 - Part : Part;
737 CCValAssign &VA = ArgLocs[j + Idx];
738 const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
739
740 if (VA.isMemLoc() && !Flags.isByVal()) {
741 // Individual pieces may have been spilled to the stack and others
742 // passed in registers.
743
744 // TODO: The memory size may be larger than the value we need to
745 // store. We may need to adjust the offset for big endian targets.
746 LLT MemTy = Handler.getStackValueStoreType(DL, VA, Flags);
747
749 Register StackAddr = Handler.getStackAddress(
750 MemTy.getSizeInBytes(), VA.getLocMemOffset(), MPO, Flags);
751
752 Handler.assignValueToAddress(Args[i], Part, StackAddr, MemTy, MPO, VA);
753 continue;
754 }
755
756 if (VA.isMemLoc() && Flags.isByVal()) {
757 assert(Args[i].Regs.size() == 1 &&
758 "didn't expect split byval pointer");
759
760 if (Handler.isIncomingArgumentHandler()) {
761 // We just need to copy the frame index value to the pointer.
763 Register StackAddr = Handler.getStackAddress(
764 Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
765 MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
766 } else {
767 // For outgoing byval arguments, insert the implicit copy byval
768 // implies, such that writes in the callee do not modify the caller's
769 // value.
770 uint64_t MemSize = Flags.getByValSize();
771 int64_t Offset = VA.getLocMemOffset();
772
773 MachinePointerInfo DstMPO;
774 Register StackAddr =
775 Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
776
777 MachinePointerInfo SrcMPO(Args[i].OrigValue);
778 if (!Args[i].OrigValue) {
779 // We still need to accurately track the stack address space if we
780 // don't know the underlying value.
781 const LLT PtrTy = MRI.getType(StackAddr);
782 SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
783 }
784
785 Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
786 inferAlignFromPtrInfo(MF, DstMPO));
787
788 Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
789 inferAlignFromPtrInfo(MF, SrcMPO));
790
791 Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
792 DstMPO, DstAlign, SrcMPO, SrcAlign,
793 MemSize, VA);
794 }
795 continue;
796 }
797
798 assert(!VA.needsCustom() && "custom loc should have been handled already");
799
800 if (i == 0 && !ThisReturnRegs.empty() &&
801 Handler.isIncomingArgumentHandler() &&
803 Handler.assignValueToReg(ArgReg, ThisReturnRegs[Part], VA);
804 continue;
805 }
806
807 if (Handler.isIncomingArgumentHandler())
808 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
809 else {
810 DelayedOutgoingRegAssignments.emplace_back([=, &Handler]() {
811 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
812 });
813 }
814 }
815
816 // Now that all pieces have been assigned, re-pack the register typed values
817 // into the original value typed registers.
818 if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
819 // Merge the split registers into the expected larger result vregs of
820 // the original call.
821 buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
822 LocTy, Args[i].Flags[0]);
823 }
824
825 j += NumParts - 1;
826 }
827 for (auto &Fn : DelayedOutgoingRegAssignments)
828 Fn();
829
830 return true;
831}
832
834 ArrayRef<Register> VRegs, Register DemoteReg,
835 int FI) const {
836 MachineFunction &MF = MIRBuilder.getMF();
838 const DataLayout &DL = MF.getDataLayout();
839
840 SmallVector<EVT, 4> SplitVTs;
842 ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
843
844 assert(VRegs.size() == SplitVTs.size());
845
846 unsigned NumValues = SplitVTs.size();
847 Align BaseAlign = DL.getPrefTypeAlign(RetTy);
848 Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
849 LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
850
852
853 for (unsigned I = 0; I < NumValues; ++I) {
855 MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
856 auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
857 MRI.getType(VRegs[I]),
858 commonAlignment(BaseAlign, Offsets[I]));
859 MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
860 }
861}
862
864 ArrayRef<Register> VRegs,
865 Register DemoteReg) const {
866 MachineFunction &MF = MIRBuilder.getMF();
868 const DataLayout &DL = MF.getDataLayout();
869
870 SmallVector<EVT, 4> SplitVTs;
872 ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
873
874 assert(VRegs.size() == SplitVTs.size());
875
876 unsigned NumValues = SplitVTs.size();
877 Align BaseAlign = DL.getPrefTypeAlign(RetTy);
878 unsigned AS = DL.getAllocaAddrSpace();
879 LLT OffsetLLTy =
880 getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
881
882 MachinePointerInfo PtrInfo(AS);
883
884 for (unsigned I = 0; I < NumValues; ++I) {
886 MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
887 auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
888 MRI.getType(VRegs[I]),
889 commonAlignment(BaseAlign, Offsets[I]));
890 MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
891 }
892}
893
895 const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
896 MachineRegisterInfo &MRI, const DataLayout &DL) const {
897 unsigned AS = DL.getAllocaAddrSpace();
898 DemoteReg = MRI.createGenericVirtualRegister(
899 LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
900
901 Type *PtrTy = PointerType::get(F.getReturnType(), AS);
902
903 SmallVector<EVT, 1> ValueVTs;
904 ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
905
906 // NOTE: Assume that a pointer won't get split into more than one VT.
907 assert(ValueVTs.size() == 1);
908
909 ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()),
912 DemoteArg.Flags[0].setSRet();
913 SplitArgs.insert(SplitArgs.begin(), DemoteArg);
914}
915
917 const CallBase &CB,
918 CallLoweringInfo &Info) const {
919 const DataLayout &DL = MIRBuilder.getDataLayout();
920 Type *RetTy = CB.getType();
921 unsigned AS = DL.getAllocaAddrSpace();
922 LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
923
924 int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
925 DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
926
927 Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
928 ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS),
931 DemoteArg.Flags[0].setSRet();
932
933 Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
934 Info.DemoteStackIndex = FI;
935 Info.DemoteRegister = DemoteReg;
936}
937
940 CCAssignFn *Fn) const {
941 for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
942 MVT VT = MVT::getVT(Outs[I].Ty);
943 if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
944 return false;
945 }
946 return true;
947}
948
950 AttributeList Attrs,
952 const DataLayout &DL) const {
953 LLVMContext &Context = RetTy->getContext();
955
956 SmallVector<EVT, 4> SplitVTs;
957 ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
959
960 for (EVT VT : SplitVTs) {
961 unsigned NumParts =
962 TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
963 MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
964 Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
965
966 for (unsigned I = 0; I < NumParts; ++I) {
967 Outs.emplace_back(PartTy, Flags);
968 }
969 }
970}
971
973 const auto &F = MF.getFunction();
974 Type *ReturnType = F.getReturnType();
975 CallingConv::ID CallConv = F.getCallingConv();
976
978 getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
979 MF.getDataLayout());
980 return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
981}
982
984 const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
985 const SmallVectorImpl<CCValAssign> &OutLocs,
986 const SmallVectorImpl<ArgInfo> &OutArgs) const {
987 for (unsigned i = 0; i < OutLocs.size(); ++i) {
988 const auto &ArgLoc = OutLocs[i];
989 // If it's not a register, it's fine.
990 if (!ArgLoc.isRegLoc())
991 continue;
992
993 MCRegister PhysReg = ArgLoc.getLocReg();
994
995 // Only look at callee-saved registers.
996 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
997 continue;
998
1000 dbgs()
1001 << "... Call has an argument passed in a callee-saved register.\n");
1002
1003 // Check if it was copied from.
1004 const ArgInfo &OutInfo = OutArgs[i];
1005
1006 if (OutInfo.Regs.size() > 1) {
1007 LLVM_DEBUG(
1008 dbgs() << "... Cannot handle arguments in multiple registers.\n");
1009 return false;
1010 }
1011
1012 // Check if we copy the register, walking through copies from virtual
1013 // registers. Note that getDefIgnoringCopies does not ignore copies from
1014 // physical registers.
1015 MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
1016 if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
1017 LLVM_DEBUG(
1018 dbgs()
1019 << "... Parameter was not copied into a VReg, cannot tail call.\n");
1020 return false;
1021 }
1022
1023 // Got a copy. Verify that it's the same as the register we want.
1024 Register CopyRHS = RegDef->getOperand(1).getReg();
1025 if (CopyRHS != PhysReg) {
1026 LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
1027 "VReg, cannot tail call.\n");
1028 return false;
1029 }
1030 }
1031
1032 return true;
1033}
1034
1036 MachineFunction &MF,
1038 ValueAssigner &CalleeAssigner,
1039 ValueAssigner &CallerAssigner) const {
1040 const Function &F = MF.getFunction();
1041 CallingConv::ID CalleeCC = Info.CallConv;
1042 CallingConv::ID CallerCC = F.getCallingConv();
1043
1044 if (CallerCC == CalleeCC)
1045 return true;
1046
1048 CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
1049 if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
1050 return false;
1051
1053 CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
1054 if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
1055 return false;
1056
1057 // We need the argument locations to match up exactly. If there's more in
1058 // one than the other, then we are done.
1059 if (ArgLocs1.size() != ArgLocs2.size())
1060 return false;
1061
1062 // Make sure that each location is passed in exactly the same way.
1063 for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
1064 const CCValAssign &Loc1 = ArgLocs1[i];
1065 const CCValAssign &Loc2 = ArgLocs2[i];
1066
1067 // We need both of them to be the same. So if one is a register and one
1068 // isn't, we're done.
1069 if (Loc1.isRegLoc() != Loc2.isRegLoc())
1070 return false;
1071
1072 if (Loc1.isRegLoc()) {
1073 // If they don't have the same register location, we're done.
1074 if (Loc1.getLocReg() != Loc2.getLocReg())
1075 return false;
1076
1077 // They matched, so we can move to the next ArgLoc.
1078 continue;
1079 }
1080
1081 // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
1082 if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
1083 return false;
1084 }
1085
1086 return true;
1087}
1088
1090 const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const {
1091 const MVT ValVT = VA.getValVT();
1092 if (ValVT != MVT::iPTR) {
1093 LLT ValTy(ValVT);
1094
1095 // We lost the pointeriness going through CCValAssign, so try to restore it
1096 // based on the flags.
1097 if (Flags.isPointer()) {
1098 LLT PtrTy = LLT::pointer(Flags.getPointerAddrSpace(),
1099 ValTy.getScalarSizeInBits());
1100 if (ValVT.isVector())
1101 return LLT::vector(ValTy.getElementCount(), PtrTy);
1102 return PtrTy;
1103 }
1104
1105 return ValTy;
1106 }
1107
1108 unsigned AddrSpace = Flags.getPointerAddrSpace();
1109 return LLT::pointer(AddrSpace, DL.getPointerSize(AddrSpace));
1110}
1111
1113 const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1114 const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1115 const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1116 CCValAssign &VA) const {
1117 MachineFunction &MF = MIRBuilder.getMF();
1119 SrcPtrInfo,
1121 SrcAlign);
1122
1124 DstPtrInfo,
1126 MemSize, DstAlign);
1127
1128 const LLT PtrTy = MRI.getType(DstPtr);
1129 const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1130
1131 auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1132 MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1133}
1134
1136 CCValAssign &VA,
1137 unsigned MaxSizeBits) {
1138 LLT LocTy{VA.getLocVT()};
1139 LLT ValTy{VA.getValVT()};
1140
1141 if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1142 return ValReg;
1143
1144 if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1145 if (MaxSizeBits <= ValTy.getSizeInBits())
1146 return ValReg;
1147 LocTy = LLT::scalar(MaxSizeBits);
1148 }
1149
1150 const LLT ValRegTy = MRI.getType(ValReg);
1151 if (ValRegTy.isPointer()) {
1152 // The x32 ABI wants to zero extend 32-bit pointers to 64-bit registers, so
1153 // we have to cast to do the extension.
1154 LLT IntPtrTy = LLT::scalar(ValRegTy.getSizeInBits());
1155 ValReg = MIRBuilder.buildPtrToInt(IntPtrTy, ValReg).getReg(0);
1156 }
1157
1158 switch (VA.getLocInfo()) {
1159 default: break;
1160 case CCValAssign::Full:
1161 case CCValAssign::BCvt:
1162 // FIXME: bitconverting between vector types may or may not be a
1163 // nop in big-endian situations.
1164 return ValReg;
1165 case CCValAssign::AExt: {
1166 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1167 return MIB.getReg(0);
1168 }
1169 case CCValAssign::SExt: {
1170 Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1171 MIRBuilder.buildSExt(NewReg, ValReg);
1172 return NewReg;
1173 }
1174 case CCValAssign::ZExt: {
1175 Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1176 MIRBuilder.buildZExt(NewReg, ValReg);
1177 return NewReg;
1178 }
1179 }
1180 llvm_unreachable("unable to extend register");
1181}
1182
1183void CallLowering::ValueAssigner::anchor() {}
1184
1186 Register SrcReg,
1187 LLT NarrowTy) {
1188 switch (VA.getLocInfo()) {
1190 return MIRBuilder
1191 .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1192 NarrowTy.getScalarSizeInBits())
1193 .getReg(0);
1194 }
1196 return MIRBuilder
1197 .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1198 NarrowTy.getScalarSizeInBits())
1199 .getReg(0);
1200 break;
1201 }
1202 default:
1203 return SrcReg;
1204 }
1205}
1206
1207/// Check if we can use a basic COPY instruction between the two types.
1208///
1209/// We're currently building on top of the infrastructure using MVT, which loses
1210/// pointer information in the CCValAssign. We accept copies from physical
1211/// registers that have been reported as integers if it's to an equivalent sized
1212/// pointer LLT.
1213static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1214 if (SrcTy == DstTy)
1215 return true;
1216
1217 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1218 return false;
1219
1220 SrcTy = SrcTy.getScalarType();
1221 DstTy = DstTy.getScalarType();
1222
1223 return (SrcTy.isPointer() && DstTy.isScalar()) ||
1224 (DstTy.isPointer() && SrcTy.isScalar());
1225}
1226
1228 Register PhysReg,
1229 CCValAssign VA) {
1230 const MVT LocVT = VA.getLocVT();
1231 const LLT LocTy(LocVT);
1232 const LLT RegTy = MRI.getType(ValVReg);
1233
1234 if (isCopyCompatibleType(RegTy, LocTy)) {
1235 MIRBuilder.buildCopy(ValVReg, PhysReg);
1236 return;
1237 }
1238
1239 auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1240 auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1241 MIRBuilder.buildTrunc(ValVReg, Hint);
1242}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
static void addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags, const std::function< bool(Attribute::AttrKind)> &AttrFn)
Helper function which updates Flags when AttrFn returns true.
static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, Register SrcReg, LLT SrcTy, LLT PartTy, unsigned ExtendOp=TargetOpcode::G_ANYEXT)
Create a sequence of instructions to expand the value in SrcReg (of type SrcTy) to the types in DstRe...
static MachineInstrBuilder mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, ArrayRef< Register > SrcRegs)
Pack values SrcRegs to cover the vector type result DstRegs.
static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef< Register > OrigRegs, ArrayRef< Register > Regs, LLT LLTy, LLT PartLLT, const ISD::ArgFlagsTy Flags)
Create a sequence of instructions to combine pieces split into register typed values to the original ...
static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy)
Check if we can use a basic COPY instruction between the two types.
static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags)
This file describes how to lower LLVM calls to machine code calls.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
uint64_t Size
static unsigned NumFixedArgs
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineIRBuilder class.
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::FuncInfo FuncInfo
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
R600 Clause Merge
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
@ Flags
Definition: TextStubV5.cpp:93
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition: ArrayRef.h:226
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:202
const T & front() const
front - Get the first element.
Definition: ArrayRef.h:166
iterator end() const
Definition: ArrayRef.h:152
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
iterator begin() const
Definition: ArrayRef.h:151
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:158
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:317
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:87
CCState - This class holds information needed while lowering arguments and return values.
CallingConv::ID getCallingConv() const
LLVMContext & getContext() const
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
unsigned getLocMemOffset() const
Register getLocReg() const
LocInfo getLocInfo() const
bool needsCustom() const
bool isMemLoc() const
unsigned getValNo() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1186
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
Definition: InstrTypes.h:1745
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2046
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1467
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Definition: InstrTypes.h:1401
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1266
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1344
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1486
bool isTailCall() const
Tests if this call site is marked as a tail call.
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, ArrayRef< Register > ThisReturnRegs=std::nullopt) const
Use Handler to insert code to handle the argument/return values represented by Args.
void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder, const CallBase &CB, CallLoweringInfo &Info) const
For the call-base described by CB, insert the hidden sret ArgInfo to the OrigArgs field of Info.
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, ArrayRef< Register > ThisReturnRegs=std::nullopt) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const
This hook must be implemented to check whether the return values described by Outs can fit into the r...
Definition: CallLowering.h:492
virtual bool isTypeIsValidForThisReturn(EVT Ty) const
For targets which support the "returned" parameter attribute, returns true if the given type is a val...
Definition: CallLowering.h:592
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call, unsigned ArgIdx) const
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags, const AttributeList &Attrs, unsigned OpIdx) const
Adds flags to Flags based off of the attributes in Attrs.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs, SmallVectorImpl< BaseArgInfo > &Outs, const DataLayout &DL) const
Get the type and the ArgFlags for the split components of RetTy as returned by ComputeValueVTs.
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
Definition: CallLowering.h:340
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:552
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
ISD::ArgFlagsTy getAttributesForReturn(const CallBase &Call) const
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
bool isVarArg() const
Definition: DerivedTypes.h:123
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:670
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:275
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
constexpr unsigned getAddressSpace() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr LLT getScalarType() const
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
Machine Value Type.
bool isVector() const
Return true if this is a vector value type.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:573
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_ZEXT Op, Size.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildMemCpy(const SrcOp &DstPtr, const SrcOp &SrcPtr, const SrcOp &Size, MachineMemOperand &DstMMO, MachineMemOperand &SrcMMO)
MachineInstrBuilder buildAssertAlign(const DstOp &Res, const SrcOp &Op, Align AlignVal)
Build and insert Res = G_ASSERT_ALIGN Op, AlignVal.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildAssertSExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_SEXT Op, Size.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:516
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Class to represent pointers.
Definition: DerivedTypes.h:632
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:809
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:685
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:182
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:461
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition: Utils.cpp:945
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:523
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:121
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:960
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:712
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
const Value * OrigValue
Optionally track the original IR value for the argument.
Definition: CallLowering.h:73
SmallVector< Register, 4 > Regs
Definition: CallLowering.h:63
unsigned OrigArgIndex
Index original Function's argument.
Definition: CallLowering.h:76
static const unsigned NoArgIndex
Sentinel value for implicit machine-level input arguments.
Definition: CallLowering.h:79
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Definition: CallLowering.h:51
Register buildExtensionHint(CCValAssign &VA, Register SrcReg, LLT NarrowTy)
Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on VA, returning the new register ...
void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign VA) override
Provides a default implementation for argument handling.
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:161
virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, const ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State)
Wrap call to (typically tablegenerated CCAssignFn).
Definition: CallLowering.h:185
Register extendRegister(Register ValReg, CCValAssign &VA, unsigned MaxSizeBits=0)
Extend a register to the location type given in VA, capped at extending to at most MaxSize bits.
void copyArgumentMemory(const ArgInfo &Arg, Register DstPtr, Register SrcPtr, const MachinePointerInfo &DstPtrInfo, Align DstAlign, const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize, CCValAssign &VA) const
Do a memory copy of MemSize bytes from SrcPtr to DstPtr.
virtual Register getStackAddress(uint64_t MemSize, int64_t Offset, MachinePointerInfo &MPO, ISD::ArgFlagsTy Flags)=0
Materialize a VReg containing the address of the specified stack-based object.
virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const
Return the in-memory size to write for the argument at VA.
virtual void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, MachinePointerInfo &MPO, CCValAssign &VA)=0
The specified value has been assigned to a stack location.
bool isIncomingArgumentHandler() const
Returns true if the handler is dealing with incoming arguments, i.e.
Definition: CallLowering.h:243
virtual unsigned assignCustomValue(ArgInfo &Arg, ArrayRef< CCValAssign > VAs, std::function< void()> *Thunk=nullptr)
Handle custom values, which may be passed into one or more of VAs.
Definition: CallLowering.h:295
virtual void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign VA)=0
The specified value has been assigned to a physical register, handle the appropriate COPY (either to ...
Extended Value Type.
Definition: ValueTypes.h:34
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:615
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:194
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117