LLVM 20.0.0git
M68kISelLowering.cpp
Go to the documentation of this file.
1//===-- M68kISelLowering.cpp - M68k DAG Lowering Impl -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the interfaces that M68k uses to lower LLVM code into a
11/// selection DAG.
12///
13//===----------------------------------------------------------------------===//
14
15#include "M68kISelLowering.h"
16#include "M68kCallingConv.h"
17#include "M68kMachineFunction.h"
18#include "M68kSubtarget.h"
19#include "M68kTargetMachine.h"
21
22#include "llvm/ADT/Statistic.h"
31#include "llvm/IR/CallingConv.h"
35#include "llvm/Support/Debug.h"
39
40using namespace llvm;
41
42#define DEBUG_TYPE "M68k-isel"
43
44STATISTIC(NumTailCalls, "Number of tail calls");
45
47 const M68kSubtarget &STI)
48 : TargetLowering(TM), Subtarget(STI), TM(TM) {
49
50 MVT PtrVT = MVT::i32;
51
53
54 auto *RegInfo = Subtarget.getRegisterInfo();
55 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
56
57 // Set up the register classes.
58 addRegisterClass(MVT::i8, &M68k::DR8RegClass);
59 addRegisterClass(MVT::i16, &M68k::XR16RegClass);
60 addRegisterClass(MVT::i32, &M68k::XR32RegClass);
61
62 for (auto VT : MVT::integer_valuetypes()) {
66 }
67
68 // We don't accept any truncstore of integer registers.
69 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
70 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
71 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
72 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
73 setTruncStoreAction(MVT::i32, MVT::i8, Expand);
74 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
75
78 if (Subtarget.atLeastM68020())
80 else
83
84 for (auto OP :
87 setOperationAction(OP, MVT::i8, Promote);
88 setOperationAction(OP, MVT::i16, Legal);
89 setOperationAction(OP, MVT::i32, LibCall);
90 }
91
92 for (auto OP : {ISD::UMUL_LOHI, ISD::SMUL_LOHI}) {
93 setOperationAction(OP, MVT::i8, Expand);
94 setOperationAction(OP, MVT::i16, Expand);
95 }
96
97 for (auto OP : {ISD::SMULO, ISD::UMULO}) {
98 setOperationAction(OP, MVT::i8, Custom);
99 setOperationAction(OP, MVT::i16, Custom);
100 setOperationAction(OP, MVT::i32, Custom);
101 }
102
104 setOperationAction(OP, MVT::i32, Custom);
105
106 // Add/Sub overflow ops with MVT::Glues are lowered to CCR dependences.
107 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
112 }
113
114 // SADDO and friends are legal with this setup, i hope
115 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
120 }
121
124
125 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
131 }
132
133 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
138 }
139
146
151
154
156
158
159 // We lower the `atomic-compare-and-swap` to `__sync_val_compare_and_swap`
160 // for subtarget < M68020
162 setOperationAction(ISD::ATOMIC_CMP_SWAP, {MVT::i8, MVT::i16, MVT::i32},
163 Subtarget.atLeastM68020() ? Legal : LibCall);
164
166
167 // M68k does not have native read-modify-write support, so expand all of them
168 // to `__sync_fetch_*` for target < M68020, otherwise expand to CmpxChg.
169 // See `shouldExpandAtomicRMWInIR` below.
171 {
183 },
184 {MVT::i8, MVT::i16, MVT::i32}, LibCall);
185
187}
188
191 return Subtarget.atLeastM68020()
194}
195
198 return M68k::D0;
199}
200
203 return M68k::D1;
204}
205
208 return StringSwitch<InlineAsm::ConstraintCode>(ConstraintCode)
210 // We borrow ConstraintCode::Um for 'U'.
213}
214
216 LLVMContext &Context, EVT VT) const {
217 // M68k SETcc producess either 0x00 or 0xFF
218 return MVT::i8;
219}
220
222 EVT Ty) const {
223 if (Ty.isSimple()) {
224 return Ty.getSimpleVT();
225 }
226 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
227}
228
229#include "M68kGenCallingConv.inc"
230
232
233static StructReturnType
235 if (Outs.empty())
236 return NotStructReturn;
237
238 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
239 if (!Flags.isSRet())
240 return NotStructReturn;
241 if (Flags.isInReg())
242 return RegStructReturn;
243 return StackStructReturn;
244}
245
246/// Determines whether a function uses struct return semantics.
247static StructReturnType
249 if (Ins.empty())
250 return NotStructReturn;
251
252 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
253 if (!Flags.isSRet())
254 return NotStructReturn;
255 if (Flags.isInReg())
256 return RegStructReturn;
257 return StackStructReturn;
258}
259
260/// Make a copy of an aggregate at address specified by "Src" to address
261/// "Dst" with size and alignment information specified by the specific
262/// parameter attribute. The copy will be passed as a byval function parameter.
264 SDValue Chain, ISD::ArgFlagsTy Flags,
265 SelectionDAG &DAG, const SDLoc &DL) {
266 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), DL, MVT::i32);
267
268 return DAG.getMemcpy(
269 Chain, DL, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
270 /*isVolatile=*/false, /*AlwaysInline=*/true,
271 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(), MachinePointerInfo());
272}
273
274/// Return true if the calling convention is one that we can guarantee TCO for.
275static bool canGuaranteeTCO(CallingConv::ID CC) { return false; }
276
277/// Return true if we might ever do TCO for calls with this calling convention.
279 switch (CC) {
280 // C calling conventions:
281 case CallingConv::C:
282 return true;
283 default:
284 return canGuaranteeTCO(CC);
285 }
286}
287
288/// Return true if the function is being made into a tailcall target by
289/// changing its ABI.
290static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
291 return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
292}
293
294/// Return true if the given stack call argument is already available in the
295/// same position (relatively) of the caller's incoming argument stack.
296static bool MatchingStackOffset(SDValue Arg, unsigned Offset,
299 const M68kInstrInfo *TII,
300 const CCValAssign &VA) {
301 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
302
303 for (;;) {
304 // Look through nodes that don't alter the bits of the incoming value.
305 unsigned Op = Arg.getOpcode();
307 Arg = Arg.getOperand(0);
308 continue;
309 }
310 if (Op == ISD::TRUNCATE) {
311 const SDValue &TruncInput = Arg.getOperand(0);
312 if (TruncInput.getOpcode() == ISD::AssertZext &&
313 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
314 Arg.getValueType()) {
315 Arg = TruncInput.getOperand(0);
316 continue;
317 }
318 }
319 break;
320 }
321
322 int FI = INT_MAX;
323 if (Arg.getOpcode() == ISD::CopyFromReg) {
324 Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
326 return false;
327 MachineInstr *Def = MRI->getVRegDef(VR);
328 if (!Def)
329 return false;
330 if (!Flags.isByVal()) {
331 if (!TII->isLoadFromStackSlot(*Def, FI))
332 return false;
333 } else {
334 unsigned Opcode = Def->getOpcode();
335 if ((Opcode == M68k::LEA32p || Opcode == M68k::LEA32f) &&
336 Def->getOperand(1).isFI()) {
337 FI = Def->getOperand(1).getIndex();
338 Bytes = Flags.getByValSize();
339 } else
340 return false;
341 }
342 } else if (auto *Ld = dyn_cast<LoadSDNode>(Arg)) {
343 if (Flags.isByVal())
344 // ByVal argument is passed in as a pointer but it's now being
345 // dereferenced. e.g.
346 // define @foo(%struct.X* %A) {
347 // tail call @bar(%struct.X* byval %A)
348 // }
349 return false;
350 SDValue Ptr = Ld->getBasePtr();
351 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
352 if (!FINode)
353 return false;
354 FI = FINode->getIndex();
355 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
356 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
357 FI = FINode->getIndex();
358 Bytes = Flags.getByValSize();
359 } else
360 return false;
361
362 assert(FI != INT_MAX);
363 if (!MFI.isFixedObjectIndex(FI))
364 return false;
365
366 if (Offset != MFI.getObjectOffset(FI))
367 return false;
368
369 if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
370 // If the argument location is wider than the argument type, check that any
371 // extension flags match.
372 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
373 Flags.isSExt() != MFI.isObjectSExt(FI)) {
374 return false;
375 }
376 }
377
378 return Bytes == MFI.getObjectSize(FI);
379}
380
382M68kTargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
385 int ReturnAddrIndex = FuncInfo->getRAIndex();
386
387 if (ReturnAddrIndex == 0) {
388 // Set up a frame object for the return address.
389 unsigned SlotSize = Subtarget.getSlotSize();
390 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(
391 SlotSize, -(int64_t)SlotSize, false);
392 FuncInfo->setRAIndex(ReturnAddrIndex);
393 }
394
395 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
396}
397
398SDValue M68kTargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
399 SDValue &OutRetAddr,
400 SDValue Chain,
401 bool IsTailCall, int FPDiff,
402 const SDLoc &DL) const {
403 EVT VT = getPointerTy(DAG.getDataLayout());
404 OutRetAddr = getReturnAddressFrameIndex(DAG);
405
406 // Load the "old" Return address.
407 OutRetAddr = DAG.getLoad(VT, DL, Chain, OutRetAddr, MachinePointerInfo());
408 return SDValue(OutRetAddr.getNode(), 1);
409}
410
411SDValue M68kTargetLowering::EmitTailCallStoreRetAddr(
412 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetFI,
413 EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &DL) const {
414 if (!FPDiff)
415 return Chain;
416
417 // Calculate the new stack slot for the return address.
418 int NewFO = MF.getFrameInfo().CreateFixedObject(
419 SlotSize, (int64_t)FPDiff - SlotSize, false);
420
421 SDValue NewFI = DAG.getFrameIndex(NewFO, PtrVT);
422 // Store the return address to the appropriate stack slot.
423 Chain = DAG.getStore(
424 Chain, DL, RetFI, NewFI,
426 return Chain;
427}
428
430M68kTargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
432 const SDLoc &DL, SelectionDAG &DAG,
433 const CCValAssign &VA,
434 MachineFrameInfo &MFI,
435 unsigned ArgIdx) const {
436 // Create the nodes corresponding to a load from this parameter slot.
437 ISD::ArgFlagsTy Flags = Ins[ArgIdx].Flags;
438 EVT ValVT;
439
440 // If value is passed by pointer we have address passed instead of the value
441 // itself.
443 ValVT = VA.getLocVT();
444 else
445 ValVT = VA.getValVT();
446
447 // Because we are dealing with BE architecture we need to offset loading of
448 // partial types
449 int Offset = VA.getLocMemOffset();
450 if (VA.getValVT() == MVT::i8) {
451 Offset += 3;
452 } else if (VA.getValVT() == MVT::i16) {
453 Offset += 2;
454 }
455
456 // TODO Interrupt handlers
457 // Calculate SP offset of interrupt parameter, re-arrange the slot normally
458 // taken by a return address.
459
460 // FIXME For now, all byval parameter objects are marked mutable. This can
461 // be changed with more analysis. In case of tail call optimization mark all
462 // arguments mutable. Since they could be overwritten by lowering of arguments
463 // in case of a tail call.
464 bool AlwaysUseMutable = shouldGuaranteeTCO(
465 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
466 bool IsImmutable = !AlwaysUseMutable && !Flags.isByVal();
467
468 if (Flags.isByVal()) {
469 unsigned Bytes = Flags.getByValSize();
470 if (Bytes == 0)
471 Bytes = 1; // Don't create zero-sized stack objects.
472 int FI = MFI.CreateFixedObject(Bytes, Offset, IsImmutable);
473 // TODO Interrupt handlers
474 // Adjust SP offset of interrupt parameter.
475 return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
476 } else {
477 int FI =
478 MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, Offset, IsImmutable);
479
480 // Set SExt or ZExt flag.
481 if (VA.getLocInfo() == CCValAssign::ZExt) {
482 MFI.setObjectZExt(FI, true);
483 } else if (VA.getLocInfo() == CCValAssign::SExt) {
484 MFI.setObjectSExt(FI, true);
485 }
486
487 // TODO Interrupt handlers
488 // Adjust SP offset of interrupt parameter.
489
490 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
491 SDValue Val = DAG.getLoad(
492 ValVT, DL, Chain, FIN,
494 return VA.isExtInLoc() ? DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val)
495 : Val;
496 }
497}
498
499SDValue M68kTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
500 SDValue Arg, const SDLoc &DL,
501 SelectionDAG &DAG,
502 const CCValAssign &VA,
503 ISD::ArgFlagsTy Flags) const {
504 unsigned LocMemOffset = VA.getLocMemOffset();
505 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, DL);
506 PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
507 StackPtr, PtrOff);
508 if (Flags.isByVal())
509 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, DL);
510
511 return DAG.getStore(
512 Chain, DL, Arg, PtrOff,
514}
515
516//===----------------------------------------------------------------------===//
517// Call
518//===----------------------------------------------------------------------===//
519
520SDValue M68kTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
521 SmallVectorImpl<SDValue> &InVals) const {
522 SelectionDAG &DAG = CLI.DAG;
523 SDLoc &DL = CLI.DL;
525 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
527 SDValue Chain = CLI.Chain;
528 SDValue Callee = CLI.Callee;
529 CallingConv::ID CallConv = CLI.CallConv;
530 bool &IsTailCall = CLI.IsTailCall;
531 bool IsVarArg = CLI.IsVarArg;
532
535 bool IsSibcall = false;
537 // const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
538
539 if (CallConv == CallingConv::M68k_INTR)
540 report_fatal_error("M68k interrupts may not be called directly");
541
542 auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
543 if (Attr.getValueAsBool())
544 IsTailCall = false;
545
546 // FIXME Add tailcalls support
547
548 bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
549 if (IsMustTail) {
550 // Force this to be a tail call. The verifier rules are enough to ensure
551 // that we can lower this successfully without moving the return address
552 // around.
553 IsTailCall = true;
554 } else if (IsTailCall) {
555 // Check if it's really possible to do a tail call.
556 IsTailCall = IsEligibleForTailCallOptimization(
557 Callee, CallConv, IsVarArg, SR != NotStructReturn,
558 MF.getFunction().hasStructRetAttr(), CLI.RetTy, Outs, OutVals, Ins,
559 DAG);
560
561 // Sibcalls are automatically detected tailcalls which do not require
562 // ABI changes.
563 if (!MF.getTarget().Options.GuaranteedTailCallOpt && IsTailCall)
564 IsSibcall = true;
565
566 if (IsTailCall)
567 ++NumTailCalls;
568 }
569
570 assert(!(IsVarArg && canGuaranteeTCO(CallConv)) &&
571 "Var args not supported with calling convention fastcc");
572
573 // Analyze operands of the call, assigning locations to each operand.
575 SmallVector<Type *, 4> ArgTypes;
576 for (const auto &Arg : CLI.getArgs())
577 ArgTypes.emplace_back(Arg.Ty);
578 M68kCCState CCInfo(ArgTypes, CallConv, IsVarArg, MF, ArgLocs,
579 *DAG.getContext());
580 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
581
582 // Get a count of how many bytes are to be pushed on the stack.
583 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
584 if (IsSibcall) {
585 // This is a sibcall. The memory operands are available in caller's
586 // own caller's stack.
587 NumBytes = 0;
588 } else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
589 canGuaranteeTCO(CallConv)) {
590 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
591 }
592
593 int FPDiff = 0;
594 if (IsTailCall && !IsSibcall && !IsMustTail) {
595 // Lower arguments at fp - stackoffset + fpdiff.
596 unsigned NumBytesCallerPushed = MFI->getBytesToPopOnReturn();
597
598 FPDiff = NumBytesCallerPushed - NumBytes;
599
600 // Set the delta of movement of the returnaddr stackslot.
601 // But only set if delta is greater than previous delta.
602 if (FPDiff < MFI->getTCReturnAddrDelta())
603 MFI->setTCReturnAddrDelta(FPDiff);
604 }
605
606 unsigned NumBytesToPush = NumBytes;
607 unsigned NumBytesToPop = NumBytes;
608
609 // If we have an inalloca argument, all stack space has already been allocated
610 // for us and be right at the top of the stack. We don't support multiple
611 // arguments passed in memory when using inalloca.
612 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
613 NumBytesToPush = 0;
614 if (!ArgLocs.back().isMemLoc())
615 report_fatal_error("cannot use inalloca attribute on a register "
616 "parameter");
617 if (ArgLocs.back().getLocMemOffset() != 0)
618 report_fatal_error("any parameter with the inalloca attribute must be "
619 "the only memory argument");
620 }
621
622 if (!IsSibcall)
623 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
624 NumBytes - NumBytesToPush, DL);
625
626 SDValue RetFI;
627 // Load return address for tail calls.
628 if (IsTailCall && FPDiff)
629 Chain = EmitTailCallLoadRetAddr(DAG, RetFI, Chain, IsTailCall, FPDiff, DL);
630
632 SmallVector<SDValue, 8> MemOpChains;
634
635 // Walk the register/memloc assignments, inserting copies/loads. In the case
636 // of tail call optimization arguments are handle later.
637 const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
638 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
639 ISD::ArgFlagsTy Flags = Outs[i].Flags;
640
641 // Skip inalloca arguments, they have already been written.
642 if (Flags.isInAlloca())
643 continue;
644
645 CCValAssign &VA = ArgLocs[i];
646 EVT RegVT = VA.getLocVT();
647 SDValue Arg = OutVals[i];
648 bool IsByVal = Flags.isByVal();
649
650 // Promote the value if needed.
651 switch (VA.getLocInfo()) {
652 default:
653 llvm_unreachable("Unknown loc info!");
655 break;
657 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
658 break;
660 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
661 break;
663 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
664 break;
666 Arg = DAG.getBitcast(RegVT, Arg);
667 break;
669 // Store the argument.
670 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
671 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
672 Chain = DAG.getStore(
673 Chain, DL, Arg, SpillSlot,
675 Arg = SpillSlot;
676 break;
677 }
678 }
679
680 if (VA.isRegLoc()) {
681 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
682 } else if (!IsSibcall && (!IsTailCall || IsByVal)) {
683 assert(VA.isMemLoc());
684 if (!StackPtr.getNode()) {
685 StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
687 }
688 MemOpChains.push_back(
689 LowerMemOpCallTo(Chain, StackPtr, Arg, DL, DAG, VA, Flags));
690 }
691 }
692
693 if (!MemOpChains.empty())
694 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
695
696 // FIXME Make sure PIC style GOT works as expected
697 // The only time GOT is really needed is for Medium-PIC static data
698 // otherwise we are happy with pc-rel or static references
699
700 if (IsVarArg && IsMustTail) {
701 const auto &Forwards = MFI->getForwardedMustTailRegParms();
702 for (const auto &F : Forwards) {
703 SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
704 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
705 }
706 }
707
708 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
709 // don't need this because the eligibility check rejects calls that require
710 // shuffling arguments passed in memory.
711 if (!IsSibcall && IsTailCall) {
712 // Force all the incoming stack arguments to be loaded from the stack
713 // before any new outgoing arguments are stored to the stack, because the
714 // outgoing stack slots may alias the incoming argument stack slots, and
715 // the alias isn't otherwise explicit. This is slightly more conservative
716 // than necessary, because it means that each store effectively depends
717 // on every argument instead of just those arguments it would clobber.
718 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
719
720 SmallVector<SDValue, 8> MemOpChains2;
721 SDValue FIN;
722 int FI = 0;
723 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
724 CCValAssign &VA = ArgLocs[i];
725 if (VA.isRegLoc())
726 continue;
727 assert(VA.isMemLoc());
728 SDValue Arg = OutVals[i];
729 ISD::ArgFlagsTy Flags = Outs[i].Flags;
730 // Skip inalloca arguments. They don't require any work.
731 if (Flags.isInAlloca())
732 continue;
733 // Create frame index.
734 int32_t Offset = VA.getLocMemOffset() + FPDiff;
735 uint32_t OpSize = (VA.getLocVT().getSizeInBits() + 7) / 8;
736 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
737 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
738
739 if (Flags.isByVal()) {
740 // Copy relative to framepointer.
742 if (!StackPtr.getNode()) {
743 StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
745 }
748
749 MemOpChains2.push_back(
750 CreateCopyOfByValArgument(Source, FIN, ArgChain, Flags, DAG, DL));
751 } else {
752 // Store relative to framepointer.
753 MemOpChains2.push_back(DAG.getStore(
754 ArgChain, DL, Arg, FIN,
756 }
757 }
758
759 if (!MemOpChains2.empty())
760 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains2);
761
762 // Store the return address to the appropriate stack slot.
763 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetFI,
765 Subtarget.getSlotSize(), FPDiff, DL);
766 }
767
768 // Build a sequence of copy-to-reg nodes chained together with token chain
769 // and flag operands which copy the outgoing args into registers.
770 SDValue InGlue;
771 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
772 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
773 RegsToPass[i].second, InGlue);
774 InGlue = Chain.getValue(1);
775 }
776
777 if (Callee->getOpcode() == ISD::GlobalAddress) {
778 // If the callee is a GlobalAddress node (quite common, every direct call
779 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
780 // it.
781 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
782
783 // We should use extra load for direct calls to dllimported functions in
784 // non-JIT mode.
785 const GlobalValue *GV = G->getGlobal();
786 if (!GV->hasDLLImportStorageClass()) {
787 unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
788
790 GV, DL, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
791
792 if (OpFlags == M68kII::MO_GOTPCREL) {
793
794 // Add a wrapper.
796 getPointerTy(DAG.getDataLayout()), Callee);
797
798 // Add extra indirection
799 Callee = DAG.getLoad(
800 getPointerTy(DAG.getDataLayout()), DL, DAG.getEntryNode(), Callee,
802 }
803 }
804 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
806 unsigned char OpFlags =
807 Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
808
810 S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
811 }
812
814
815 if (!IsSibcall && IsTailCall) {
816 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, 0, InGlue, DL);
817 InGlue = Chain.getValue(1);
818 }
819
820 Ops.push_back(Chain);
821 Ops.push_back(Callee);
822
823 if (IsTailCall)
824 Ops.push_back(DAG.getConstant(FPDiff, DL, MVT::i32));
825
826 // Add argument registers to the end of the list so that they are known live
827 // into the call.
828 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
829 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
830 RegsToPass[i].second.getValueType()));
831
832 // Add a register mask operand representing the call-preserved registers.
833 const uint32_t *Mask = RegInfo->getCallPreservedMask(MF, CallConv);
834 assert(Mask && "Missing call preserved mask for calling convention");
835
836 Ops.push_back(DAG.getRegisterMask(Mask));
837
838 if (InGlue.getNode())
839 Ops.push_back(InGlue);
840
841 if (IsTailCall) {
843 return DAG.getNode(M68kISD::TC_RETURN, DL, MVT::Other, Ops);
844 }
845
846 // Returns a chain & a flag for retval copy to use.
847 Chain = DAG.getNode(M68kISD::CALL, DL, {MVT::Other, MVT::Glue}, Ops);
848 InGlue = Chain.getValue(1);
849
850 // Create the CALLSEQ_END node.
851 unsigned NumBytesForCalleeToPop;
852 if (M68k::isCalleePop(CallConv, IsVarArg,
854 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
855 } else if (!canGuaranteeTCO(CallConv) && SR == StackStructReturn) {
856 // If this is a call to a struct-return function, the callee
857 // pops the hidden struct pointer, so we have to push it back.
858 NumBytesForCalleeToPop = 4;
859 } else {
860 NumBytesForCalleeToPop = 0; // Callee pops nothing.
861 }
862
863 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
864 // No need to reset the stack after the call if the call doesn't return. To
865 // make the MI verify, we'll pretend the callee does it for us.
866 NumBytesForCalleeToPop = NumBytes;
867 }
868
869 // Returns a flag for retval copy to use.
870 if (!IsSibcall) {
871 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
872 InGlue, DL);
873 InGlue = Chain.getValue(1);
874 }
875
876 // Handle result values, copying them out of physregs into vregs that we
877 // return.
878 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
879 InVals);
880}
881
882SDValue M68kTargetLowering::LowerCallResult(
883 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
884 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
885 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
886
887 // Assign locations to each value returned by this call.
889 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
890 *DAG.getContext());
891 CCInfo.AnalyzeCallResult(Ins, RetCC_M68k);
892
893 // Copy all of the result registers out of their specified physreg.
894 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
895 CCValAssign &VA = RVLocs[i];
896 EVT CopyVT = VA.getLocVT();
897
898 /// ??? is this correct?
899 Chain = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), CopyVT, InGlue)
900 .getValue(1);
901 SDValue Val = Chain.getValue(0);
902
903 if (VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1)
904 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
905
906 InGlue = Chain.getValue(2);
907 InVals.push_back(Val);
908 }
909
910 return Chain;
911}
912
913//===----------------------------------------------------------------------===//
914// Formal Arguments Calling Convention Implementation
915//===----------------------------------------------------------------------===//
916
917SDValue M68kTargetLowering::LowerFormalArguments(
918 SDValue Chain, CallingConv::ID CCID, bool IsVarArg,
919 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
920 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
923 // const TargetFrameLowering &TFL = *Subtarget.getFrameLowering();
924
925 MachineFrameInfo &MFI = MF.getFrameInfo();
926
927 // Assign locations to all of the incoming arguments.
929 SmallVector<Type *, 4> ArgTypes;
930 for (const Argument &Arg : MF.getFunction().args())
931 ArgTypes.emplace_back(Arg.getType());
932 M68kCCState CCInfo(ArgTypes, CCID, IsVarArg, MF, ArgLocs, *DAG.getContext());
933
934 CCInfo.AnalyzeFormalArguments(Ins, CC_M68k);
935
936 unsigned LastVal = ~0U;
937 SDValue ArgValue;
938 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
939 CCValAssign &VA = ArgLocs[i];
940 assert(VA.getValNo() != LastVal && "Same value in different locations");
941 (void)LastVal;
942
943 LastVal = VA.getValNo();
944
945 if (VA.isRegLoc()) {
946 EVT RegVT = VA.getLocVT();
947 const TargetRegisterClass *RC;
948 if (RegVT == MVT::i32)
949 RC = &M68k::XR32RegClass;
950 else
951 llvm_unreachable("Unknown argument type!");
952
953 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
954 ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
955
956 // If this is an 8 or 16-bit value, it is really passed promoted to 32
957 // bits. Insert an assert[sz]ext to capture this, then truncate to the
958 // right size.
959 if (VA.getLocInfo() == CCValAssign::SExt) {
960 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
961 DAG.getValueType(VA.getValVT()));
962 } else if (VA.getLocInfo() == CCValAssign::ZExt) {
963 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
964 DAG.getValueType(VA.getValVT()));
965 } else if (VA.getLocInfo() == CCValAssign::BCvt) {
966 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
967 }
968
969 if (VA.isExtInLoc()) {
970 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
971 }
972 } else {
973 assert(VA.isMemLoc());
974 ArgValue = LowerMemArgument(Chain, CCID, Ins, DL, DAG, VA, MFI, i);
975 }
976
977 // If value is passed via pointer - do a load.
978 // TODO Make sure this handling on indirect arguments is correct
980 ArgValue =
981 DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, MachinePointerInfo());
982
983 InVals.push_back(ArgValue);
984 }
985
986 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
987 // Swift calling convention does not require we copy the sret argument
988 // into %D0 for the return. We don't set SRetReturnReg for Swift.
989 if (CCID == CallingConv::Swift)
990 continue;
991
992 // ABI require that for returning structs by value we copy the sret argument
993 // into %D0 for the return. Save the argument into a virtual register so
994 // that we can access it from the return points.
995 if (Ins[i].Flags.isSRet()) {
996 unsigned Reg = MMFI->getSRetReturnReg();
997 if (!Reg) {
998 MVT PtrTy = getPointerTy(DAG.getDataLayout());
1000 MMFI->setSRetReturnReg(Reg);
1001 }
1002 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
1003 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
1004 break;
1005 }
1006 }
1007
1008 unsigned StackSize = CCInfo.getStackSize();
1009 // Align stack specially for tail calls.
1011 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
1012
1013 // If the function takes variable number of arguments, make a frame index for
1014 // the start of the first vararg value... for expansion of llvm.va_start. We
1015 // can skip this if there are no va_start calls.
1016 if (MFI.hasVAStart()) {
1017 MMFI->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1018 }
1019
1020 if (IsVarArg && MFI.hasMustTailInVarArgFunc()) {
1021 // We forward some GPRs and some vector types.
1022 SmallVector<MVT, 2> RegParmTypes;
1023 MVT IntVT = MVT::i32;
1024 RegParmTypes.push_back(IntVT);
1025
1026 // Compute the set of forwarded registers. The rest are scratch.
1027 // ??? what is this for?
1030 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_M68k);
1031
1032 // Copy all forwards from physical to virtual registers.
1033 for (ForwardedRegister &F : Forwards) {
1034 // FIXME Can we use a less constrained schedule?
1035 SDValue RegVal = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
1037 Chain = DAG.getCopyToReg(Chain, DL, F.VReg, RegVal);
1038 }
1039 }
1040
1041 // Some CCs need callee pop.
1042 if (M68k::isCalleePop(CCID, IsVarArg,
1044 MMFI->setBytesToPopOnReturn(StackSize); // Callee pops everything.
1045 } else {
1046 MMFI->setBytesToPopOnReturn(0); // Callee pops nothing.
1047 // If this is an sret function, the return should pop the hidden pointer.
1049 MMFI->setBytesToPopOnReturn(4);
1050 }
1051
1052 MMFI->setArgumentStackSize(StackSize);
1053
1054 return Chain;
1055}
1056
1057//===----------------------------------------------------------------------===//
1058// Return Value Calling Convention Implementation
1059//===----------------------------------------------------------------------===//
1060
1061bool M68kTargetLowering::CanLowerReturn(
1062 CallingConv::ID CCID, MachineFunction &MF, bool IsVarArg,
1063 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
1065 CCState CCInfo(CCID, IsVarArg, MF, RVLocs, Context);
1066 return CCInfo.CheckReturn(Outs, RetCC_M68k);
1067}
1068
1069SDValue
1070M68kTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CCID,
1071 bool IsVarArg,
1073 const SmallVectorImpl<SDValue> &OutVals,
1074 const SDLoc &DL, SelectionDAG &DAG) const {
1077
1079 CCState CCInfo(CCID, IsVarArg, MF, RVLocs, *DAG.getContext());
1080 CCInfo.AnalyzeReturn(Outs, RetCC_M68k);
1081
1082 SDValue Glue;
1084 // Operand #0 = Chain (updated below)
1085 RetOps.push_back(Chain);
1086 // Operand #1 = Bytes To Pop
1087 RetOps.push_back(
1088 DAG.getTargetConstant(MFI->getBytesToPopOnReturn(), DL, MVT::i32));
1089
1090 // Copy the result values into the output registers.
1091 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1092 CCValAssign &VA = RVLocs[i];
1093 assert(VA.isRegLoc() && "Can only return in registers!");
1094 SDValue ValToCopy = OutVals[i];
1095 EVT ValVT = ValToCopy.getValueType();
1096
1097 // Promote values to the appropriate types.
1098 if (VA.getLocInfo() == CCValAssign::SExt)
1099 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1100 else if (VA.getLocInfo() == CCValAssign::ZExt)
1101 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), ValToCopy);
1102 else if (VA.getLocInfo() == CCValAssign::AExt) {
1103 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
1104 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1105 else
1106 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), ValToCopy);
1107 } else if (VA.getLocInfo() == CCValAssign::BCvt)
1108 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
1109
1110 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), ValToCopy, Glue);
1111 Glue = Chain.getValue(1);
1112 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1113 }
1114
1115 // Swift calling convention does not require we copy the sret argument
1116 // into %d0 for the return, and SRetReturnReg is not set for Swift.
1117
1118 // ABI require that for returning structs by value we copy the sret argument
1119 // into %D0 for the return. Save the argument into a virtual register so that
1120 // we can access it from the return points.
1121 //
1122 // Checking Function.hasStructRetAttr() here is insufficient because the IR
1123 // may not have an explicit sret argument. If MFI.CanLowerReturn is
1124 // false, then an sret argument may be implicitly inserted in the SelDAG. In
1125 // either case MFI->setSRetReturnReg() will have been called.
1126 if (unsigned SRetReg = MFI->getSRetReturnReg()) {
1127 // ??? Can i just move this to the top and escape this explanation?
1128 // When we have both sret and another return value, we should use the
1129 // original Chain stored in RetOps[0], instead of the current Chain updated
1130 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
1131
1132 // For the case of sret and another return value, we have
1133 // Chain_0 at the function entry
1134 // Chain_1 = getCopyToReg(Chain_0) in the above loop
1135 // If we use Chain_1 in getCopyFromReg, we will have
1136 // Val = getCopyFromReg(Chain_1)
1137 // Chain_2 = getCopyToReg(Chain_1, Val) from below
1138
1139 // getCopyToReg(Chain_0) will be glued together with
1140 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
1141 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
1142 // Data dependency from Unit B to Unit A due to usage of Val in
1143 // getCopyToReg(Chain_1, Val)
1144 // Chain dependency from Unit A to Unit B
1145
1146 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
1147 SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
1149
1150 // ??? How will this work if CC does not use registers for args passing?
1151 // ??? What if I return multiple structs?
1152 unsigned RetValReg = M68k::D0;
1153 Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Glue);
1154 Glue = Chain.getValue(1);
1155
1156 RetOps.push_back(
1157 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
1158 }
1159
1160 RetOps[0] = Chain; // Update chain.
1161
1162 // Add the glue if we have it.
1163 if (Glue.getNode())
1164 RetOps.push_back(Glue);
1165
1166 return DAG.getNode(M68kISD::RET, DL, MVT::Other, RetOps);
1167}
1168
1169//===----------------------------------------------------------------------===//
1170// Fast Calling Convention (tail call) implementation
1171//===----------------------------------------------------------------------===//
1172
1173// Like std call, callee cleans arguments, convention except that ECX is
1174// reserved for storing the tail called function address. Only 2 registers are
1175// free for argument passing (inreg). Tail call optimization is performed
1176// provided:
1177// * tailcallopt is enabled
1178// * caller/callee are fastcc
1179// On M68k_64 architecture with GOT-style position independent code only
1180// local (within module) calls are supported at the moment. To keep the stack
1181// aligned according to platform abi the function GetAlignedArgumentStackSize
1182// ensures that argument delta is always multiples of stack alignment. (Dynamic
1183// linkers need this - darwin's dyld for example) If a tail called function
1184// callee has more arguments than the caller the caller needs to make sure that
1185// there is room to move the RETADDR to. This is achieved by reserving an area
1186// the size of the argument delta right after the original RETADDR, but before
1187// the saved framepointer or the spilled registers e.g. caller(arg1, arg2)
1188// calls callee(arg1, arg2,arg3,arg4) stack layout:
1189// arg1
1190// arg2
1191// RETADDR
1192// [ new RETADDR
1193// move area ]
1194// (possible EBP)
1195// ESI
1196// EDI
1197// local1 ..
1198
1199/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
1200/// requirement.
1201unsigned
1202M68kTargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
1203 SelectionDAG &DAG) const {
1204 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
1205 unsigned StackAlignment = TFI.getStackAlignment();
1206 uint64_t AlignMask = StackAlignment - 1;
1207 int64_t Offset = StackSize;
1208 unsigned SlotSize = Subtarget.getSlotSize();
1209 if ((Offset & AlignMask) <= (StackAlignment - SlotSize)) {
1210 // Number smaller than 12 so just add the difference.
1211 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
1212 } else {
1213 // Mask out lower bits, add stackalignment once plus the 12 bytes.
1214 Offset =
1215 ((~AlignMask) & Offset) + StackAlignment + (StackAlignment - SlotSize);
1216 }
1217 return Offset;
1218}
1219
1220/// Check whether the call is eligible for tail call optimization. Targets
1221/// that want to do tail call optimization should implement this function.
1222bool M68kTargetLowering::IsEligibleForTailCallOptimization(
1223 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
1224 bool IsCalleeStructRet, bool IsCallerStructRet, Type *RetTy,
1226 const SmallVectorImpl<SDValue> &OutVals,
1227 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
1228 if (!mayTailCallThisCC(CalleeCC))
1229 return false;
1230
1231 // If -tailcallopt is specified, make fastcc functions tail-callable.
1233 const auto &CallerF = MF.getFunction();
1234
1235 CallingConv::ID CallerCC = CallerF.getCallingConv();
1236 bool CCMatch = CallerCC == CalleeCC;
1237
1239 if (canGuaranteeTCO(CalleeCC) && CCMatch)
1240 return true;
1241 return false;
1242 }
1243
1244 // Look for obvious safe cases to perform tail call optimization that do not
1245 // require ABI changes. This is what gcc calls sibcall.
1246
1247 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
1248 // emit a special epilogue.
1249 const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1250 if (RegInfo->hasStackRealignment(MF))
1251 return false;
1252
1253 // Also avoid sibcall optimization if either caller or callee uses struct
1254 // return semantics.
1255 if (IsCalleeStructRet || IsCallerStructRet)
1256 return false;
1257
1258 // Do not sibcall optimize vararg calls unless all arguments are passed via
1259 // registers.
1260 LLVMContext &C = *DAG.getContext();
1261 if (IsVarArg && !Outs.empty()) {
1262
1264 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1265
1266 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1267 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1268 if (!ArgLocs[i].isRegLoc())
1269 return false;
1270 }
1271
1272 // Check that the call results are passed in the same way.
1273 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, RetCC_M68k,
1274 RetCC_M68k))
1275 return false;
1276
1277 // The callee has to preserve all registers the caller needs to preserve.
1278 const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
1279 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1280 if (!CCMatch) {
1281 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1282 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1283 return false;
1284 }
1285
1286 unsigned StackArgsSize = 0;
1287
1288 // If the callee takes no arguments then go on to check the results of the
1289 // call.
1290 if (!Outs.empty()) {
1291 // Check if stack adjustment is needed. For now, do not do this if any
1292 // argument is passed on the stack.
1294 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1295
1296 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1297 StackArgsSize = CCInfo.getStackSize();
1298
1299 if (StackArgsSize) {
1300 // Check if the arguments are already laid out in the right way as
1301 // the caller's fixed stack objects.
1302 MachineFrameInfo &MFI = MF.getFrameInfo();
1303 const MachineRegisterInfo *MRI = &MF.getRegInfo();
1304 const M68kInstrInfo *TII = Subtarget.getInstrInfo();
1305 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1306 CCValAssign &VA = ArgLocs[i];
1307 SDValue Arg = OutVals[i];
1308 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1310 return false;
1311 if (!VA.isRegLoc()) {
1312 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
1313 TII, VA))
1314 return false;
1315 }
1316 }
1317 }
1318
1319 bool PositionIndependent = isPositionIndependent();
1320 // If the tailcall address may be in a register, then make sure it's
1321 // possible to register allocate for it. The call address can
1322 // only target %A0 or %A1 since the tail call must be scheduled after
1323 // callee-saved registers are restored. These happen to be the same
1324 // registers used to pass 'inreg' arguments so watch out for those.
1325 if ((!isa<GlobalAddressSDNode>(Callee) &&
1326 !isa<ExternalSymbolSDNode>(Callee)) ||
1327 PositionIndependent) {
1328 unsigned NumInRegs = 0;
1329 // In PIC we need an extra register to formulate the address computation
1330 // for the callee.
1331 unsigned MaxInRegs = PositionIndependent ? 1 : 2;
1332
1333 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1334 CCValAssign &VA = ArgLocs[i];
1335 if (!VA.isRegLoc())
1336 continue;
1337 Register Reg = VA.getLocReg();
1338 switch (Reg) {
1339 default:
1340 break;
1341 case M68k::A0:
1342 case M68k::A1:
1343 if (++NumInRegs == MaxInRegs)
1344 return false;
1345 break;
1346 }
1347 }
1348 }
1349
1350 const MachineRegisterInfo &MRI = MF.getRegInfo();
1351 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
1352 return false;
1353 }
1354
1355 bool CalleeWillPop = M68k::isCalleePop(
1356 CalleeCC, IsVarArg, MF.getTarget().Options.GuaranteedTailCallOpt);
1357
1358 if (unsigned BytesToPop =
1360 // If we have bytes to pop, the callee must pop them.
1361 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
1362 if (!CalleePopMatches)
1363 return false;
1364 } else if (CalleeWillPop && StackArgsSize > 0) {
1365 // If we don't have bytes to pop, make sure the callee doesn't pop any.
1366 return false;
1367 }
1368
1369 return true;
1370}
1371
1372//===----------------------------------------------------------------------===//
1373// Custom Lower
1374//===----------------------------------------------------------------------===//
1375
1377 SelectionDAG &DAG) const {
1378 switch (Op.getOpcode()) {
1379 default:
1380 llvm_unreachable("Should not custom lower this!");
1381 case ISD::SADDO:
1382 case ISD::UADDO:
1383 case ISD::SSUBO:
1384 case ISD::USUBO:
1385 case ISD::SMULO:
1386 case ISD::UMULO:
1387 return LowerXALUO(Op, DAG);
1388 case ISD::SETCC:
1389 return LowerSETCC(Op, DAG);
1390 case ISD::SETCCCARRY:
1391 return LowerSETCCCARRY(Op, DAG);
1392 case ISD::SELECT:
1393 return LowerSELECT(Op, DAG);
1394 case ISD::BRCOND:
1395 return LowerBRCOND(Op, DAG);
1396 case ISD::ADDC:
1397 case ISD::ADDE:
1398 case ISD::SUBC:
1399 case ISD::SUBE:
1400 return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
1401 case ISD::ConstantPool:
1402 return LowerConstantPool(Op, DAG);
1403 case ISD::GlobalAddress:
1404 return LowerGlobalAddress(Op, DAG);
1406 return LowerExternalSymbol(Op, DAG);
1407 case ISD::BlockAddress:
1408 return LowerBlockAddress(Op, DAG);
1409 case ISD::JumpTable:
1410 return LowerJumpTable(Op, DAG);
1411 case ISD::VASTART:
1412 return LowerVASTART(Op, DAG);
1414 return LowerDYNAMIC_STACKALLOC(Op, DAG);
1415 case ISD::SHL_PARTS:
1416 return LowerShiftLeftParts(Op, DAG);
1417 case ISD::SRA_PARTS:
1418 return LowerShiftRightParts(Op, DAG, true);
1419 case ISD::SRL_PARTS:
1420 return LowerShiftRightParts(Op, DAG, false);
1421 case ISD::ATOMIC_FENCE:
1422 return LowerATOMICFENCE(Op, DAG);
1424 return LowerGlobalTLSAddress(Op, DAG);
1425 }
1426}
1427
1428SDValue M68kTargetLowering::LowerExternalSymbolCall(SelectionDAG &DAG,
1429 SDLoc Loc,
1430 llvm::StringRef SymbolName,
1431 ArgListTy &&ArgList) const {
1432 PointerType *PtrTy = PointerType::get(*DAG.getContext(), 0);
1433 CallLoweringInfo CLI(DAG);
1434 CLI.setDebugLoc(Loc)
1435 .setChain(DAG.getEntryNode())
1437 DAG.getExternalSymbol(SymbolName.data(),
1439 std::move(ArgList));
1440 return LowerCallTo(CLI).first;
1441}
1442
1443SDValue M68kTargetLowering::getTLSGetAddr(GlobalAddressSDNode *GA,
1444 SelectionDAG &DAG,
1445 unsigned TargetFlags) const {
1446 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
1448 GA->getGlobal(), GA, GA->getValueType(0), GA->getOffset(), TargetFlags);
1449 SDValue Arg = DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, GOT, TGA);
1450
1451 PointerType *PtrTy = PointerType::get(*DAG.getContext(), 0);
1452
1453 ArgListTy Args;
1454 ArgListEntry Entry;
1455 Entry.Node = Arg;
1456 Entry.Ty = PtrTy;
1457 Args.push_back(Entry);
1458 return LowerExternalSymbolCall(DAG, SDLoc(GA), "__tls_get_addr",
1459 std::move(Args));
1460}
1461
1462SDValue M68kTargetLowering::getM68kReadTp(SDLoc Loc, SelectionDAG &DAG) const {
1463 return LowerExternalSymbolCall(DAG, Loc, "__m68k_read_tp", ArgListTy());
1464}
1465
1466SDValue M68kTargetLowering::LowerTLSGeneralDynamic(GlobalAddressSDNode *GA,
1467 SelectionDAG &DAG) const {
1468 return getTLSGetAddr(GA, DAG, M68kII::MO_TLSGD);
1469}
1470
1471SDValue M68kTargetLowering::LowerTLSLocalDynamic(GlobalAddressSDNode *GA,
1472 SelectionDAG &DAG) const {
1473 SDValue Addr = getTLSGetAddr(GA, DAG, M68kII::MO_TLSLDM);
1474 SDValue TGA =
1475 DAG.getTargetGlobalAddress(GA->getGlobal(), GA, GA->getValueType(0),
1477 return DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, TGA, Addr);
1478}
1479
1480SDValue M68kTargetLowering::LowerTLSInitialExec(GlobalAddressSDNode *GA,
1481 SelectionDAG &DAG) const {
1482 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
1483 SDValue Tp = getM68kReadTp(SDLoc(GA), DAG);
1484 SDValue TGA =
1485 DAG.getTargetGlobalAddress(GA->getGlobal(), GA, GA->getValueType(0),
1487 SDValue Addr = DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, TGA, GOT);
1488 SDValue Offset =
1489 DAG.getLoad(MVT::i32, SDLoc(GA), DAG.getEntryNode(), Addr,
1491
1492 return DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, Offset, Tp);
1493}
1494
1495SDValue M68kTargetLowering::LowerTLSLocalExec(GlobalAddressSDNode *GA,
1496 SelectionDAG &DAG) const {
1497 SDValue Tp = getM68kReadTp(SDLoc(GA), DAG);
1498 SDValue TGA =
1499 DAG.getTargetGlobalAddress(GA->getGlobal(), GA, GA->getValueType(0),
1501 return DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, TGA, Tp);
1502}
1503
1504SDValue M68kTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1505 SelectionDAG &DAG) const {
1506 assert(Subtarget.isTargetELF());
1507
1508 auto *GA = cast<GlobalAddressSDNode>(Op);
1509 TLSModel::Model AccessModel = DAG.getTarget().getTLSModel(GA->getGlobal());
1510
1511 switch (AccessModel) {
1513 return LowerTLSGeneralDynamic(GA, DAG);
1515 return LowerTLSLocalDynamic(GA, DAG);
1517 return LowerTLSInitialExec(GA, DAG);
1519 return LowerTLSLocalExec(GA, DAG);
1520 }
1521
1522 llvm_unreachable("Unexpected TLS access model type");
1523}
1524
1525bool M68kTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
1526 SDValue C) const {
1527 // Shifts and add instructions in M68000 and M68010 support
1528 // up to 32 bits, but mul only has 16-bit variant. So it's almost
1529 // certainly beneficial to lower 8/16/32-bit mul to their
1530 // add / shifts counterparts. But for 64-bits mul, it might be
1531 // safer to just leave it to compiler runtime implementations.
1532 return VT.bitsLE(MVT::i32) || Subtarget.atLeastM68020();
1533}
1534
1535static bool isOverflowArithmetic(unsigned Opcode) {
1536 switch (Opcode) {
1537 case ISD::UADDO:
1538 case ISD::SADDO:
1539 case ISD::USUBO:
1540 case ISD::SSUBO:
1541 case ISD::UMULO:
1542 case ISD::SMULO:
1543 return true;
1544 default:
1545 return false;
1546 }
1547}
1548
1550 SDValue &Result, SDValue &CCR,
1551 unsigned &CC) {
1552 SDNode *N = Op.getNode();
1553 EVT VT = N->getValueType(0);
1554 SDValue LHS = N->getOperand(0);
1555 SDValue RHS = N->getOperand(1);
1556 SDLoc DL(Op);
1557
1558 unsigned TruncOp = 0;
1559 auto PromoteMULO = [&](unsigned ExtOp) {
1560 // We don't have 8-bit multiplications, so promote i8 version of U/SMULO
1561 // to i16.
1562 // Ideally this should be done by legalizer but sadly there is no promotion
1563 // rule for U/SMULO at this moment.
1564 if (VT == MVT::i8) {
1565 LHS = DAG.getNode(ExtOp, DL, MVT::i16, LHS);
1566 RHS = DAG.getNode(ExtOp, DL, MVT::i16, RHS);
1567 VT = MVT::i16;
1568 TruncOp = ISD::TRUNCATE;
1569 }
1570 };
1571
1572 bool NoOverflow = false;
1573 unsigned BaseOp = 0;
1574 switch (Op.getOpcode()) {
1575 default:
1576 llvm_unreachable("Unknown ovf instruction!");
1577 case ISD::SADDO:
1578 BaseOp = M68kISD::ADD;
1579 CC = M68k::COND_VS;
1580 break;
1581 case ISD::UADDO:
1582 BaseOp = M68kISD::ADD;
1583 CC = M68k::COND_CS;
1584 break;
1585 case ISD::SSUBO:
1586 BaseOp = M68kISD::SUB;
1587 CC = M68k::COND_VS;
1588 break;
1589 case ISD::USUBO:
1590 BaseOp = M68kISD::SUB;
1591 CC = M68k::COND_CS;
1592 break;
1593 case ISD::UMULO:
1594 PromoteMULO(ISD::ZERO_EXTEND);
1595 NoOverflow = VT != MVT::i32;
1596 BaseOp = NoOverflow ? ISD::MUL : M68kISD::UMUL;
1597 CC = M68k::COND_VS;
1598 break;
1599 case ISD::SMULO:
1600 PromoteMULO(ISD::SIGN_EXTEND);
1601 NoOverflow = VT != MVT::i32;
1602 BaseOp = NoOverflow ? ISD::MUL : M68kISD::SMUL;
1603 CC = M68k::COND_VS;
1604 break;
1605 }
1606
1607 SDVTList VTs;
1608 if (NoOverflow)
1609 VTs = DAG.getVTList(VT);
1610 else
1611 // Also sets CCR.
1612 VTs = DAG.getVTList(VT, MVT::i8);
1613
1614 SDValue Arith = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
1615 Result = Arith.getValue(0);
1616 if (TruncOp)
1617 // Right now the only place to truncate is from i16 to i8.
1618 Result = DAG.getNode(TruncOp, DL, MVT::i8, Arith);
1619
1620 if (NoOverflow)
1621 CCR = DAG.getConstant(0, DL, N->getValueType(1));
1622 else
1623 CCR = Arith.getValue(1);
1624}
1625
1626SDValue M68kTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
1627 SDNode *N = Op.getNode();
1628 SDLoc DL(Op);
1629
1630 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
1631 // a "setcc" instruction that checks the overflow flag.
1632 SDValue Result, CCR;
1633 unsigned CC;
1634 lowerOverflowArithmetic(Op, DAG, Result, CCR, CC);
1635
1636 SDValue Overflow;
1637 if (isa<ConstantSDNode>(CCR)) {
1638 // It's likely a result of operations that will not overflow
1639 // hence no setcc is needed.
1640 Overflow = CCR;
1641 } else {
1642 // Generate a M68kISD::SETCC.
1643 Overflow = DAG.getNode(M68kISD::SETCC, DL, N->getValueType(1),
1644 DAG.getConstant(CC, DL, MVT::i8), CCR);
1645 }
1646
1647 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Overflow);
1648}
1649
1650/// Create a BTST (Bit Test) node - Test bit \p BitNo in \p Src and set
1651/// condition according to equal/not-equal condition code \p CC.
1653 const SDLoc &DL, SelectionDAG &DAG) {
1654 // If Src is i8, promote it to i32 with any_extend. There is no i8 BTST
1655 // instruction. Since the shift amount is in-range-or-undefined, we know
1656 // that doing a bittest on the i32 value is ok.
1657 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
1658 Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
1659
1660 // If the operand types disagree, extend the shift amount to match. Since
1661 // BTST ignores high bits (like shifts) we can use anyextend.
1662 if (Src.getValueType() != BitNo.getValueType())
1663 BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
1664
1665 SDValue BTST = DAG.getNode(M68kISD::BTST, DL, MVT::i32, Src, BitNo);
1666
1667 // NOTE BTST sets CCR.Z flag
1669 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
1670 DAG.getConstant(Cond, DL, MVT::i8), BTST);
1671}
1672
1673/// Result of 'and' is compared against zero. Change to a BTST node if possible.
1675 SelectionDAG &DAG) {
1676 SDValue Op0 = And.getOperand(0);
1677 SDValue Op1 = And.getOperand(1);
1678 if (Op0.getOpcode() == ISD::TRUNCATE)
1679 Op0 = Op0.getOperand(0);
1680 if (Op1.getOpcode() == ISD::TRUNCATE)
1681 Op1 = Op1.getOperand(0);
1682
1683 SDValue LHS, RHS;
1684 if (Op1.getOpcode() == ISD::SHL)
1685 std::swap(Op0, Op1);
1686 if (Op0.getOpcode() == ISD::SHL) {
1687 if (isOneConstant(Op0.getOperand(0))) {
1688 // If we looked past a truncate, check that it's only truncating away
1689 // known zeros.
1690 unsigned BitWidth = Op0.getValueSizeInBits();
1691 unsigned AndBitWidth = And.getValueSizeInBits();
1692 if (BitWidth > AndBitWidth) {
1693 auto Known = DAG.computeKnownBits(Op0);
1694 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
1695 return SDValue();
1696 }
1697 LHS = Op1;
1698 RHS = Op0.getOperand(1);
1699 }
1700 } else if (auto *AndRHS = dyn_cast<ConstantSDNode>(Op1)) {
1701 uint64_t AndRHSVal = AndRHS->getZExtValue();
1702 SDValue AndLHS = Op0;
1703
1704 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
1705 LHS = AndLHS.getOperand(0);
1706 RHS = AndLHS.getOperand(1);
1707 }
1708
1709 // Use BTST if the immediate can't be encoded in a TEST instruction.
1710 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
1711 LHS = AndLHS;
1712 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), DL, LHS.getValueType());
1713 }
1714 }
1715
1716 if (LHS.getNode())
1717 return getBitTestCondition(LHS, RHS, CC, DL, DAG);
1718
1719 return SDValue();
1720}
1721
1723 switch (SetCCOpcode) {
1724 default:
1725 llvm_unreachable("Invalid integer condition!");
1726 case ISD::SETEQ:
1727 return M68k::COND_EQ;
1728 case ISD::SETGT:
1729 return M68k::COND_GT;
1730 case ISD::SETGE:
1731 return M68k::COND_GE;
1732 case ISD::SETLT:
1733 return M68k::COND_LT;
1734 case ISD::SETLE:
1735 return M68k::COND_LE;
1736 case ISD::SETNE:
1737 return M68k::COND_NE;
1738 case ISD::SETULT:
1739 return M68k::COND_CS;
1740 case ISD::SETUGE:
1741 return M68k::COND_CC;
1742 case ISD::SETUGT:
1743 return M68k::COND_HI;
1744 case ISD::SETULE:
1745 return M68k::COND_LS;
1746 }
1747}
1748
1749/// Do a one-to-one translation of a ISD::CondCode to the M68k-specific
1750/// condition code, returning the condition code and the LHS/RHS of the
1751/// comparison to make.
1752static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
1753 bool IsFP, SDValue &LHS, SDValue &RHS,
1754 SelectionDAG &DAG) {
1755 if (!IsFP) {
1756 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
1757 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
1758 // X > -1 -> X == 0, jump !sign.
1759 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1760 return M68k::COND_PL;
1761 }
1762 if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
1763 // X < 0 -> X == 0, jump on sign.
1764 return M68k::COND_MI;
1765 }
1766 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
1767 // X < 1 -> X <= 0
1768 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1769 return M68k::COND_LE;
1770 }
1771 }
1772
1773 return TranslateIntegerM68kCC(SetCCOpcode);
1774 }
1775
1776 // First determine if it is required or is profitable to flip the operands.
1777
1778 // If LHS is a foldable load, but RHS is not, flip the condition.
1779 if (ISD::isNON_EXTLoad(LHS.getNode()) && !ISD::isNON_EXTLoad(RHS.getNode())) {
1780 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
1781 std::swap(LHS, RHS);
1782 }
1783
1784 switch (SetCCOpcode) {
1785 default:
1786 break;
1787 case ISD::SETOLT:
1788 case ISD::SETOLE:
1789 case ISD::SETUGT:
1790 case ISD::SETUGE:
1791 std::swap(LHS, RHS);
1792 break;
1793 }
1794
1795 // On a floating point condition, the flags are set as follows:
1796 // ZF PF CF op
1797 // 0 | 0 | 0 | X > Y
1798 // 0 | 0 | 1 | X < Y
1799 // 1 | 0 | 0 | X == Y
1800 // 1 | 1 | 1 | unordered
1801 switch (SetCCOpcode) {
1802 default:
1803 llvm_unreachable("Condcode should be pre-legalized away");
1804 case ISD::SETUEQ:
1805 case ISD::SETEQ:
1806 return M68k::COND_EQ;
1807 case ISD::SETOLT: // flipped
1808 case ISD::SETOGT:
1809 case ISD::SETGT:
1810 return M68k::COND_HI;
1811 case ISD::SETOLE: // flipped
1812 case ISD::SETOGE:
1813 case ISD::SETGE:
1814 return M68k::COND_CC;
1815 case ISD::SETUGT: // flipped
1816 case ISD::SETULT:
1817 case ISD::SETLT:
1818 return M68k::COND_CS;
1819 case ISD::SETUGE: // flipped
1820 case ISD::SETULE:
1821 case ISD::SETLE:
1822 return M68k::COND_LS;
1823 case ISD::SETONE:
1824 case ISD::SETNE:
1825 return M68k::COND_NE;
1826 case ISD::SETOEQ:
1827 case ISD::SETUNE:
1828 return M68k::COND_INVALID;
1829 }
1830}
1831
1832// Convert (truncate (srl X, N) to i1) to (bt X, N)
1834 const SDLoc &DL, SelectionDAG &DAG) {
1835
1836 assert(Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 &&
1837 "Expected TRUNCATE to i1 node");
1838
1839 if (Op.getOperand(0).getOpcode() != ISD::SRL)
1840 return SDValue();
1841
1842 SDValue ShiftRight = Op.getOperand(0);
1843 return getBitTestCondition(ShiftRight.getOperand(0), ShiftRight.getOperand(1),
1844 CC, DL, DAG);
1845}
1846
1847/// \brief return true if \c Op has a use that doesn't just read flags.
1849 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
1850 ++UI) {
1851 SDNode *User = UI->getUser();
1852 unsigned UOpNo = UI->getOperandNo();
1853 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
1854 // Look past truncate.
1855 UOpNo = User->use_begin()->getOperandNo();
1856 User = User->use_begin()->getUser();
1857 }
1858
1859 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
1860 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
1861 return true;
1862 }
1863 return false;
1864}
1865
1866SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC,
1867 const SDLoc &DL, SelectionDAG &DAG) const {
1868
1869 // CF and OF aren't always set the way we want. Determine which
1870 // of these we need.
1871 bool NeedCF = false;
1872 bool NeedOF = false;
1873 switch (M68kCC) {
1874 default:
1875 break;
1876 case M68k::COND_HI:
1877 case M68k::COND_CC:
1878 case M68k::COND_CS:
1879 case M68k::COND_LS:
1880 NeedCF = true;
1881 break;
1882 case M68k::COND_GT:
1883 case M68k::COND_GE:
1884 case M68k::COND_LT:
1885 case M68k::COND_LE:
1886 case M68k::COND_VS:
1887 case M68k::COND_VC: {
1888 // Check if we really need to set the
1889 // Overflow flag. If NoSignedWrap is present
1890 // that is not actually needed.
1891 switch (Op->getOpcode()) {
1892 case ISD::ADD:
1893 case ISD::SUB:
1894 case ISD::MUL:
1895 case ISD::SHL: {
1896 if (Op.getNode()->getFlags().hasNoSignedWrap())
1897 break;
1898 [[fallthrough]];
1899 }
1900 default:
1901 NeedOF = true;
1902 break;
1903 }
1904 break;
1905 }
1906 }
1907 // See if we can use the CCR value from the operand instead of
1908 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
1909 // we prove that the arithmetic won't overflow, we can't use OF or CF.
1910 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
1911 // Emit a CMP with 0, which is the TEST pattern.
1912 return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
1913 DAG.getConstant(0, DL, Op.getValueType()), Op);
1914 }
1915 unsigned Opcode = 0;
1916 unsigned NumOperands = 0;
1917
1918 // Truncate operations may prevent the merge of the SETCC instruction
1919 // and the arithmetic instruction before it. Attempt to truncate the operands
1920 // of the arithmetic instruction and use a reduced bit-width instruction.
1921 bool NeedTruncation = false;
1922 SDValue ArithOp = Op;
1923 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
1924 SDValue Arith = Op->getOperand(0);
1925 // Both the trunc and the arithmetic op need to have one user each.
1926 if (Arith->hasOneUse())
1927 switch (Arith.getOpcode()) {
1928 default:
1929 break;
1930 case ISD::ADD:
1931 case ISD::SUB:
1932 case ISD::AND:
1933 case ISD::OR:
1934 case ISD::XOR: {
1935 NeedTruncation = true;
1936 ArithOp = Arith;
1937 }
1938 }
1939 }
1940
1941 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
1942 // which may be the result of a CAST. We use the variable 'Op', which is the
1943 // non-casted variable when we check for possible users.
1944 switch (ArithOp.getOpcode()) {
1945 case ISD::ADD:
1946 Opcode = M68kISD::ADD;
1947 NumOperands = 2;
1948 break;
1949 case ISD::SHL:
1950 case ISD::SRL:
1951 // If we have a constant logical shift that's only used in a comparison
1952 // against zero turn it into an equivalent AND. This allows turning it into
1953 // a TEST instruction later.
1954 if ((M68kCC == M68k::COND_EQ || M68kCC == M68k::COND_NE) &&
1955 Op->hasOneUse() && isa<ConstantSDNode>(Op->getOperand(1)) &&
1956 !hasNonFlagsUse(Op)) {
1957 EVT VT = Op.getValueType();
1958 unsigned BitWidth = VT.getSizeInBits();
1959 unsigned ShAmt = Op->getConstantOperandVal(1);
1960 if (ShAmt >= BitWidth) // Avoid undefined shifts.
1961 break;
1962 APInt Mask = ArithOp.getOpcode() == ISD::SRL
1964 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
1965 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
1966 break;
1967 Op = DAG.getNode(ISD::AND, DL, VT, Op->getOperand(0),
1968 DAG.getConstant(Mask, DL, VT));
1969 }
1970 break;
1971
1972 case ISD::AND:
1973 // If the primary 'and' result isn't used, don't bother using
1974 // M68kISD::AND, because a TEST instruction will be better.
1975 if (!hasNonFlagsUse(Op)) {
1976 SDValue Op0 = ArithOp->getOperand(0);
1977 SDValue Op1 = ArithOp->getOperand(1);
1978 EVT VT = ArithOp.getValueType();
1979 bool IsAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
1980 bool IsLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
1981
1982 // But if we can combine this into an ANDN operation, then create an AND
1983 // now and allow it to be pattern matched into an ANDN.
1984 if (/*!Subtarget.hasBMI() ||*/ !IsAndn || !IsLegalAndnType)
1985 break;
1986 }
1987 [[fallthrough]];
1988 case ISD::SUB:
1989 case ISD::OR:
1990 case ISD::XOR:
1991 // Due to the ISEL shortcoming noted above, be conservative if this op is
1992 // likely to be selected as part of a load-modify-store instruction.
1993 for (const auto *U : Op.getNode()->users())
1994 if (U->getOpcode() == ISD::STORE)
1995 goto default_case;
1996
1997 // Otherwise use a regular CCR-setting instruction.
1998 switch (ArithOp.getOpcode()) {
1999 default:
2000 llvm_unreachable("unexpected operator!");
2001 case ISD::SUB:
2002 Opcode = M68kISD::SUB;
2003 break;
2004 case ISD::XOR:
2005 Opcode = M68kISD::XOR;
2006 break;
2007 case ISD::AND:
2008 Opcode = M68kISD::AND;
2009 break;
2010 case ISD::OR:
2011 Opcode = M68kISD::OR;
2012 break;
2013 }
2014
2015 NumOperands = 2;
2016 break;
2017 case M68kISD::ADD:
2018 case M68kISD::SUB:
2019 case M68kISD::OR:
2020 case M68kISD::XOR:
2021 case M68kISD::AND:
2022 return SDValue(Op.getNode(), 1);
2023 default:
2024 default_case:
2025 break;
2026 }
2027
2028 // If we found that truncation is beneficial, perform the truncation and
2029 // update 'Op'.
2030 if (NeedTruncation) {
2031 EVT VT = Op.getValueType();
2032 SDValue WideVal = Op->getOperand(0);
2033 EVT WideVT = WideVal.getValueType();
2034 unsigned ConvertedOp = 0;
2035 // Use a target machine opcode to prevent further DAGCombine
2036 // optimizations that may separate the arithmetic operations
2037 // from the setcc node.
2038 switch (WideVal.getOpcode()) {
2039 default:
2040 break;
2041 case ISD::ADD:
2042 ConvertedOp = M68kISD::ADD;
2043 break;
2044 case ISD::SUB:
2045 ConvertedOp = M68kISD::SUB;
2046 break;
2047 case ISD::AND:
2048 ConvertedOp = M68kISD::AND;
2049 break;
2050 case ISD::OR:
2051 ConvertedOp = M68kISD::OR;
2052 break;
2053 case ISD::XOR:
2054 ConvertedOp = M68kISD::XOR;
2055 break;
2056 }
2057
2058 if (ConvertedOp) {
2059 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2060 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
2061 SDValue V0 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(0));
2062 SDValue V1 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(1));
2063 Op = DAG.getNode(ConvertedOp, DL, VT, V0, V1);
2064 }
2065 }
2066 }
2067
2068 if (Opcode == 0) {
2069 // Emit a CMP with 0, which is the TEST pattern.
2070 return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
2071 DAG.getConstant(0, DL, Op.getValueType()), Op);
2072 }
2073 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i8);
2074 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
2075
2076 SDValue New = DAG.getNode(Opcode, DL, VTs, Ops);
2077 DAG.ReplaceAllUsesWith(Op, New);
2078 return SDValue(New.getNode(), 1);
2079}
2080
2081/// \brief Return true if the condition is an unsigned comparison operation.
2082static bool isM68kCCUnsigned(unsigned M68kCC) {
2083 switch (M68kCC) {
2084 default:
2085 llvm_unreachable("Invalid integer condition!");
2086 case M68k::COND_EQ:
2087 case M68k::COND_NE:
2088 case M68k::COND_CS:
2089 case M68k::COND_HI:
2090 case M68k::COND_LS:
2091 case M68k::COND_CC:
2092 return true;
2093 case M68k::COND_GT:
2094 case M68k::COND_GE:
2095 case M68k::COND_LT:
2096 case M68k::COND_LE:
2097 return false;
2098 }
2099}
2100
2101SDValue M68kTargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC,
2102 const SDLoc &DL, SelectionDAG &DAG) const {
2103 if (isNullConstant(Op1))
2104 return EmitTest(Op0, M68kCC, DL, DAG);
2105
2106 assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&
2107 "Unexpected comparison operation for MVT::i1 operands");
2108
2109 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
2110 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
2111 // Only promote the compare up to I32 if it is a 16 bit operation
2112 // with an immediate. 16 bit immediates are to be avoided.
2113 if ((Op0.getValueType() == MVT::i16 &&
2114 (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
2116 unsigned ExtendOp =
2118 Op0 = DAG.getNode(ExtendOp, DL, MVT::i32, Op0);
2119 Op1 = DAG.getNode(ExtendOp, DL, MVT::i32, Op1);
2120 }
2121 // Use SUB instead of CMP to enable CSE between SUB and CMP.
2122 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i8);
2123 SDValue Sub = DAG.getNode(M68kISD::SUB, DL, VTs, Op0, Op1);
2124 return SDValue(Sub.getNode(), 1);
2125 }
2126 return DAG.getNode(M68kISD::CMP, DL, MVT::i8, Op0, Op1);
2127}
2128
2129/// Result of 'and' or 'trunc to i1' is compared against zero.
2130/// Change to a BTST node if possible.
2131SDValue M68kTargetLowering::LowerToBTST(SDValue Op, ISD::CondCode CC,
2132 const SDLoc &DL,
2133 SelectionDAG &DAG) const {
2134 if (Op.getOpcode() == ISD::AND)
2135 return LowerAndToBTST(Op, CC, DL, DAG);
2136 if (Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1)
2137 return LowerTruncateToBTST(Op, CC, DL, DAG);
2138 return SDValue();
2139}
2140
2141SDValue M68kTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2142 MVT VT = Op.getSimpleValueType();
2143 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
2144
2145 SDValue Op0 = Op.getOperand(0);
2146 SDValue Op1 = Op.getOperand(1);
2147 SDLoc DL(Op);
2148 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2149
2150 // Optimize to BTST if possible.
2151 // Lower (X & (1 << N)) == 0 to BTST(X, N).
2152 // Lower ((X >>u N) & 1) != 0 to BTST(X, N).
2153 // Lower ((X >>s N) & 1) != 0 to BTST(X, N).
2154 // Lower (trunc (X >> N) to i1) to BTST(X, N).
2155 if (Op0.hasOneUse() && isNullConstant(Op1) &&
2156 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2157 if (SDValue NewSetCC = LowerToBTST(Op0, CC, DL, DAG)) {
2158 if (VT == MVT::i1)
2159 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, NewSetCC);
2160 return NewSetCC;
2161 }
2162 }
2163
2164 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
2165 // these.
2166 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
2167 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2168
2169 // If the input is a setcc, then reuse the input setcc or use a new one with
2170 // the inverted condition.
2171 if (Op0.getOpcode() == M68kISD::SETCC) {
2173 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
2174 if (!Invert)
2175 return Op0;
2176
2177 CCode = M68k::GetOppositeBranchCondition(CCode);
2178 SDValue SetCC =
2179 DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2180 DAG.getConstant(CCode, DL, MVT::i8), Op0.getOperand(1));
2181 if (VT == MVT::i1)
2182 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
2183 return SetCC;
2184 }
2185 }
2186 if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2187 if (isOneConstant(Op1)) {
2189 return DAG.getSetCC(DL, VT, Op0, DAG.getConstant(0, DL, MVT::i1), NewCC);
2190 }
2191 if (!isNullConstant(Op1)) {
2192 SDValue Xor = DAG.getNode(ISD::XOR, DL, MVT::i1, Op0, Op1);
2193 return DAG.getSetCC(DL, VT, Xor, DAG.getConstant(0, DL, MVT::i1), CC);
2194 }
2195 }
2196
2197 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
2198 unsigned M68kCC = TranslateM68kCC(CC, DL, IsFP, Op0, Op1, DAG);
2199 if (M68kCC == M68k::COND_INVALID)
2200 return SDValue();
2201
2202 SDValue CCR = EmitCmp(Op0, Op1, M68kCC, DL, DAG);
2203 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2204 DAG.getConstant(M68kCC, DL, MVT::i8), CCR);
2205}
2206
2207SDValue M68kTargetLowering::LowerSETCCCARRY(SDValue Op,
2208 SelectionDAG &DAG) const {
2209 SDValue LHS = Op.getOperand(0);
2210 SDValue RHS = Op.getOperand(1);
2211 SDValue Carry = Op.getOperand(2);
2212 SDValue Cond = Op.getOperand(3);
2213 SDLoc DL(Op);
2214
2215 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
2216 M68k::CondCode CC = TranslateIntegerM68kCC(cast<CondCodeSDNode>(Cond)->get());
2217
2218 EVT CarryVT = Carry.getValueType();
2219 APInt NegOne = APInt::getAllOnes(CarryVT.getScalarSizeInBits());
2220 Carry = DAG.getNode(M68kISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), Carry,
2221 DAG.getConstant(NegOne, DL, CarryVT));
2222
2223 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
2224 SDValue Cmp =
2225 DAG.getNode(M68kISD::SUBX, DL, VTs, LHS, RHS, Carry.getValue(1));
2226
2227 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2228 DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
2229}
2230
2231/// Return true if opcode is a M68k logical comparison.
2233 unsigned Opc = Op.getNode()->getOpcode();
2234 if (Opc == M68kISD::CMP)
2235 return true;
2236 if (Op.getResNo() == 1 &&
2237 (Opc == M68kISD::ADD || Opc == M68kISD::SUB || Opc == M68kISD::ADDX ||
2238 Opc == M68kISD::SUBX || Opc == M68kISD::SMUL || Opc == M68kISD::UMUL ||
2239 Opc == M68kISD::OR || Opc == M68kISD::XOR || Opc == M68kISD::AND))
2240 return true;
2241
2242 if (Op.getResNo() == 2 && Opc == M68kISD::UMUL)
2243 return true;
2244
2245 return false;
2246}
2247
2249 if (V.getOpcode() != ISD::TRUNCATE)
2250 return false;
2251
2252 SDValue VOp0 = V.getOperand(0);
2253 unsigned InBits = VOp0.getValueSizeInBits();
2254 unsigned Bits = V.getValueSizeInBits();
2255 return DAG.MaskedValueIsZero(VOp0,
2256 APInt::getHighBitsSet(InBits, InBits - Bits));
2257}
2258
2259SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2260 bool addTest = true;
2261 SDValue Cond = Op.getOperand(0);
2262 SDValue Op1 = Op.getOperand(1);
2263 SDValue Op2 = Op.getOperand(2);
2264 SDLoc DL(Op);
2265 SDValue CC;
2266
2267 if (Cond.getOpcode() == ISD::SETCC) {
2268 if (SDValue NewCond = LowerSETCC(Cond, DAG))
2269 Cond = NewCond;
2270 }
2271
2272 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2273 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
2274 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
2275 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
2276 if (Cond.getOpcode() == M68kISD::SETCC &&
2277 Cond.getOperand(1).getOpcode() == M68kISD::CMP &&
2278 isNullConstant(Cond.getOperand(1).getOperand(0))) {
2279 SDValue Cmp = Cond.getOperand(1);
2280
2281 unsigned CondCode = Cond.getConstantOperandVal(0);
2282
2283 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2284 (CondCode == M68k::COND_EQ || CondCode == M68k::COND_NE)) {
2285 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
2286
2287 SDValue CmpOp0 = Cmp.getOperand(1);
2288 // Apply further optimizations for special cases
2289 // (select (x != 0), -1, 0) -> neg & sbb
2290 // (select (x == 0), 0, -1) -> neg & sbb
2291 if (isNullConstant(Y) &&
2292 (isAllOnesConstant(Op1) == (CondCode == M68k::COND_NE))) {
2293
2294 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
2295
2296 SDValue Neg =
2297 DAG.getNode(M68kISD::SUB, DL, VTs,
2298 DAG.getConstant(0, DL, CmpOp0.getValueType()), CmpOp0);
2299
2300 SDValue Res = DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2301 DAG.getConstant(M68k::COND_CS, DL, MVT::i8),
2302 SDValue(Neg.getNode(), 1));
2303 return Res;
2304 }
2305
2306 Cmp = DAG.getNode(M68kISD::CMP, DL, MVT::i8,
2307 DAG.getConstant(1, DL, CmpOp0.getValueType()), CmpOp0);
2308
2309 SDValue Res = // Res = 0 or -1.
2310 DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2311 DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cmp);
2312
2313 if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_EQ))
2314 Res = DAG.getNOT(DL, Res, Res.getValueType());
2315
2316 if (!isNullConstant(Op2))
2317 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
2318 return Res;
2319 }
2320 }
2321
2322 // Look past (and (setcc_carry (cmp ...)), 1).
2323 if (Cond.getOpcode() == ISD::AND &&
2324 Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2325 isOneConstant(Cond.getOperand(1)))
2326 Cond = Cond.getOperand(0);
2327
2328 // If condition flag is set by a M68kISD::CMP, then use it as the condition
2329 // setting operand in place of the M68kISD::SETCC.
2330 unsigned CondOpcode = Cond.getOpcode();
2331 if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2332 CC = Cond.getOperand(0);
2333
2334 SDValue Cmp = Cond.getOperand(1);
2335 unsigned Opc = Cmp.getOpcode();
2336
2337 bool IllegalFPCMov = false;
2338
2339 if ((isM68kLogicalCmp(Cmp) && !IllegalFPCMov) || Opc == M68kISD::BTST) {
2340 Cond = Cmp;
2341 addTest = false;
2342 }
2343 } else if (isOverflowArithmetic(CondOpcode)) {
2344 // Result is unused here.
2346 unsigned CCode;
2347 lowerOverflowArithmetic(Cond, DAG, Result, Cond, CCode);
2348 CC = DAG.getConstant(CCode, DL, MVT::i8);
2349 addTest = false;
2350 }
2351
2352 if (addTest) {
2353 // Look past the truncate if the high bits are known zero.
2355 Cond = Cond.getOperand(0);
2356
2357 // We know the result of AND is compared against zero. Try to match
2358 // it to BT.
2359 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
2360 if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
2361 CC = NewSetCC.getOperand(0);
2362 Cond = NewSetCC.getOperand(1);
2363 addTest = false;
2364 }
2365 }
2366 }
2367
2368 if (addTest) {
2369 CC = DAG.getConstant(M68k::COND_NE, DL, MVT::i8);
2370 Cond = EmitTest(Cond, M68k::COND_NE, DL, DAG);
2371 }
2372
2373 // a < b ? -1 : 0 -> RES = ~setcc_carry
2374 // a < b ? 0 : -1 -> RES = setcc_carry
2375 // a >= b ? -1 : 0 -> RES = setcc_carry
2376 // a >= b ? 0 : -1 -> RES = ~setcc_carry
2377 if (Cond.getOpcode() == M68kISD::SUB) {
2378 unsigned CondCode = CC->getAsZExtVal();
2379
2380 if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
2381 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2382 (isNullConstant(Op1) || isNullConstant(Op2))) {
2383 SDValue Res =
2384 DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2385 DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cond);
2386 if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_CS))
2387 return DAG.getNOT(DL, Res, Res.getValueType());
2388 return Res;
2389 }
2390 }
2391
2392 // M68k doesn't have an i8 cmov. If both operands are the result of a
2393 // truncate widen the cmov and push the truncate through. This avoids
2394 // introducing a new branch during isel and doesn't add any extensions.
2395 if (Op.getValueType() == MVT::i8 && Op1.getOpcode() == ISD::TRUNCATE &&
2396 Op2.getOpcode() == ISD::TRUNCATE) {
2397 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
2398 if (T1.getValueType() == T2.getValueType() &&
2399 // Block CopyFromReg so partial register stalls are avoided.
2400 T1.getOpcode() != ISD::CopyFromReg &&
2401 T2.getOpcode() != ISD::CopyFromReg) {
2402 SDValue Cmov =
2403 DAG.getNode(M68kISD::CMOV, DL, T1.getValueType(), T2, T1, CC, Cond);
2404 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
2405 }
2406 }
2407
2408 // Simple optimization when Cond is a constant to avoid generating
2409 // M68kISD::CMOV if possible.
2410 // TODO: Generalize this to use SelectionDAG::computeKnownBits.
2411 if (auto *Const = dyn_cast<ConstantSDNode>(Cond.getNode())) {
2412 const APInt &C = Const->getAPIntValue();
2413 if (C.countr_zero() >= 5)
2414 return Op2;
2415 else if (C.countr_one() >= 5)
2416 return Op1;
2417 }
2418
2419 // M68kISD::CMOV means set the result (which is operand 1) to the RHS if
2420 // condition is true.
2421 SDValue Ops[] = {Op2, Op1, CC, Cond};
2422 return DAG.getNode(M68kISD::CMOV, DL, Op.getValueType(), Ops);
2423}
2424
2425/// Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes
2426/// each of which has no other use apart from the AND / OR.
2427static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
2428 Opc = Op.getOpcode();
2429 if (Opc != ISD::OR && Opc != ISD::AND)
2430 return false;
2431 return (M68k::IsSETCC(Op.getOperand(0).getOpcode()) &&
2432 Op.getOperand(0).hasOneUse() &&
2433 M68k::IsSETCC(Op.getOperand(1).getOpcode()) &&
2434 Op.getOperand(1).hasOneUse());
2435}
2436
2437/// Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the
2438/// SETCC node has a single use.
2440 if (Op.getOpcode() != ISD::XOR)
2441 return false;
2442 if (isOneConstant(Op.getOperand(1)))
2443 return Op.getOperand(0).getOpcode() == M68kISD::SETCC &&
2444 Op.getOperand(0).hasOneUse();
2445 return false;
2446}
2447
2448SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2449 bool AddTest = true;
2450 SDValue Chain = Op.getOperand(0);
2451 SDValue Cond = Op.getOperand(1);
2452 SDValue Dest = Op.getOperand(2);
2453 SDLoc DL(Op);
2454 SDValue CC;
2455 bool Inverted = false;
2456
2457 if (Cond.getOpcode() == ISD::SETCC) {
2458 // Check for setcc([su]{add,sub}o == 0).
2459 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
2460 isNullConstant(Cond.getOperand(1)) &&
2461 Cond.getOperand(0).getResNo() == 1 &&
2462 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
2463 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
2464 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
2465 Cond.getOperand(0).getOpcode() == ISD::USUBO)) {
2466 Inverted = true;
2467 Cond = Cond.getOperand(0);
2468 } else {
2469 if (SDValue NewCond = LowerSETCC(Cond, DAG))
2470 Cond = NewCond;
2471 }
2472 }
2473
2474 // Look pass (and (setcc_carry (cmp ...)), 1).
2475 if (Cond.getOpcode() == ISD::AND &&
2476 Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2477 isOneConstant(Cond.getOperand(1)))
2478 Cond = Cond.getOperand(0);
2479
2480 // If condition flag is set by a M68kISD::CMP, then use it as the condition
2481 // setting operand in place of the M68kISD::SETCC.
2482 unsigned CondOpcode = Cond.getOpcode();
2483 if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2484 CC = Cond.getOperand(0);
2485
2486 SDValue Cmp = Cond.getOperand(1);
2487 unsigned Opc = Cmp.getOpcode();
2488
2489 if (isM68kLogicalCmp(Cmp) || Opc == M68kISD::BTST) {
2490 Cond = Cmp;
2491 AddTest = false;
2492 } else {
2493 switch (CC->getAsZExtVal()) {
2494 default:
2495 break;
2496 case M68k::COND_VS:
2497 case M68k::COND_CS:
2498 // These can only come from an arithmetic instruction with overflow,
2499 // e.g. SADDO, UADDO.
2500 Cond = Cond.getNode()->getOperand(1);
2501 AddTest = false;
2502 break;
2503 }
2504 }
2505 }
2506 CondOpcode = Cond.getOpcode();
2507 if (isOverflowArithmetic(CondOpcode)) {
2509 unsigned CCode;
2510 lowerOverflowArithmetic(Cond, DAG, Result, Cond, CCode);
2511
2512 if (Inverted)
2514 CC = DAG.getConstant(CCode, DL, MVT::i8);
2515
2516 AddTest = false;
2517 } else {
2518 unsigned CondOpc;
2519 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
2520 SDValue Cmp = Cond.getOperand(0).getOperand(1);
2521 if (CondOpc == ISD::OR) {
2522 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
2523 // two branches instead of an explicit OR instruction with a
2524 // separate test.
2525 if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp)) {
2526 CC = Cond.getOperand(0).getOperand(0);
2527 Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2528 Dest, CC, Cmp);
2529 CC = Cond.getOperand(1).getOperand(0);
2530 Cond = Cmp;
2531 AddTest = false;
2532 }
2533 } else { // ISD::AND
2534 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
2535 // two branches instead of an explicit AND instruction with a
2536 // separate test. However, we only do this if this block doesn't
2537 // have a fall-through edge, because this requires an explicit
2538 // jmp when the condition is false.
2539 if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp) &&
2540 Op.getNode()->hasOneUse()) {
2541 M68k::CondCode CCode =
2542 (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2543 CCode = M68k::GetOppositeBranchCondition(CCode);
2544 CC = DAG.getConstant(CCode, DL, MVT::i8);
2545 SDNode *User = *Op.getNode()->user_begin();
2546 // Look for an unconditional branch following this conditional branch.
2547 // We need this because we need to reverse the successors in order
2548 // to implement FCMP_OEQ.
2549 if (User->getOpcode() == ISD::BR) {
2550 SDValue FalseBB = User->getOperand(1);
2551 SDNode *NewBR =
2552 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
2553 assert(NewBR == User);
2554 (void)NewBR;
2555 Dest = FalseBB;
2556
2557 Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2558 Dest, CC, Cmp);
2559 M68k::CondCode CCode =
2560 (M68k::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
2561 CCode = M68k::GetOppositeBranchCondition(CCode);
2562 CC = DAG.getConstant(CCode, DL, MVT::i8);
2563 Cond = Cmp;
2564 AddTest = false;
2565 }
2566 }
2567 }
2568 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
2569 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
2570 // It should be transformed during dag combiner except when the condition
2571 // is set by a arithmetics with overflow node.
2572 M68k::CondCode CCode =
2573 (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2574 CCode = M68k::GetOppositeBranchCondition(CCode);
2575 CC = DAG.getConstant(CCode, DL, MVT::i8);
2576 Cond = Cond.getOperand(0).getOperand(1);
2577 AddTest = false;
2578 }
2579 }
2580
2581 if (AddTest) {
2582 // Look pass the truncate if the high bits are known zero.
2584 Cond = Cond.getOperand(0);
2585
2586 // We know the result is compared against zero. Try to match it to BT.
2587 if (Cond.hasOneUse()) {
2588 if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
2589 CC = NewSetCC.getOperand(0);
2590 Cond = NewSetCC.getOperand(1);
2591 AddTest = false;
2592 }
2593 }
2594 }
2595
2596 if (AddTest) {
2597 M68k::CondCode MxCond = Inverted ? M68k::COND_EQ : M68k::COND_NE;
2598 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2599 Cond = EmitTest(Cond, MxCond, DL, DAG);
2600 }
2601 return DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain, Dest, CC,
2602 Cond);
2603}
2604
2605SDValue M68kTargetLowering::LowerADDC_ADDE_SUBC_SUBE(SDValue Op,
2606 SelectionDAG &DAG) const {
2607 MVT VT = Op.getNode()->getSimpleValueType(0);
2608
2609 // Let legalize expand this if it isn't a legal type yet.
2610 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
2611 return SDValue();
2612
2613 SDVTList VTs = DAG.getVTList(VT, MVT::i8);
2614
2615 unsigned Opc;
2616 bool ExtraOp = false;
2617 switch (Op.getOpcode()) {
2618 default:
2619 llvm_unreachable("Invalid code");
2620 case ISD::ADDC:
2621 Opc = M68kISD::ADD;
2622 break;
2623 case ISD::ADDE:
2624 Opc = M68kISD::ADDX;
2625 ExtraOp = true;
2626 break;
2627 case ISD::SUBC:
2628 Opc = M68kISD::SUB;
2629 break;
2630 case ISD::SUBE:
2631 Opc = M68kISD::SUBX;
2632 ExtraOp = true;
2633 break;
2634 }
2635
2636 if (!ExtraOp)
2637 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1));
2638 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1),
2639 Op.getOperand(2));
2640}
2641
2642// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2643// their target countpart wrapped in the M68kISD::Wrapper node. Suppose N is
2644// one of the above mentioned nodes. It has to be wrapped because otherwise
2645// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2646// be used to form addressing mode. These wrapped nodes will be selected
2647// into MOV32ri.
2648SDValue M68kTargetLowering::LowerConstantPool(SDValue Op,
2649 SelectionDAG &DAG) const {
2650 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2651
2652 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2653 // global base reg.
2654 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2655
2656 unsigned WrapperKind = M68kISD::Wrapper;
2657 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2658 WrapperKind = M68kISD::WrapperPC;
2659 }
2660
2661 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2663 CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
2664
2665 SDLoc DL(CP);
2666 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2667
2668 // With PIC, the address is actually $g + Offset.
2670 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2671 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2672 Result);
2673 }
2674
2675 return Result;
2676}
2677
2678SDValue M68kTargetLowering::LowerExternalSymbol(SDValue Op,
2679 SelectionDAG &DAG) const {
2680 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
2681
2682 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2683 // global base reg.
2685 unsigned char OpFlag = Subtarget.classifyExternalReference(*Mod);
2686
2687 unsigned WrapperKind = M68kISD::Wrapper;
2688 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2689 WrapperKind = M68kISD::WrapperPC;
2690 }
2691
2692 auto PtrVT = getPointerTy(DAG.getDataLayout());
2693 SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
2694
2695 SDLoc DL(Op);
2696 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2697
2698 // With PIC, the address is actually $g + Offset.
2700 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2701 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2702 Result);
2703 }
2704
2705 // For symbols that require a load from a stub to get the address, emit the
2706 // load.
2707 if (M68kII::isGlobalStubReference(OpFlag)) {
2708 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2710 }
2711
2712 return Result;
2713}
2714
2715SDValue M68kTargetLowering::LowerBlockAddress(SDValue Op,
2716 SelectionDAG &DAG) const {
2717 unsigned char OpFlags = Subtarget.classifyBlockAddressReference();
2718 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2719 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
2720 SDLoc DL(Op);
2721 auto PtrVT = getPointerTy(DAG.getDataLayout());
2722
2723 // Create the TargetBlockAddressAddress node.
2724 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
2725
2726 if (M68kII::isPCRelBlockReference(OpFlags)) {
2727 Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2728 } else {
2729 Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2730 }
2731
2732 // With PIC, the address is actually $g + Offset.
2733 if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2734 Result =
2735 DAG.getNode(ISD::ADD, DL, PtrVT,
2736 DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2737 }
2738
2739 return Result;
2740}
2741
2742SDValue M68kTargetLowering::LowerGlobalAddress(const GlobalValue *GV,
2743 const SDLoc &DL, int64_t Offset,
2744 SelectionDAG &DAG) const {
2745 unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
2746 auto PtrVT = getPointerTy(DAG.getDataLayout());
2747
2748 // Create the TargetGlobalAddress node, folding in the constant
2749 // offset if it is legal.
2751 if (M68kII::isDirectGlobalReference(OpFlags)) {
2752 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset);
2753 Offset = 0;
2754 } else {
2755 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2756 }
2757
2758 if (M68kII::isPCRelGlobalReference(OpFlags))
2759 Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2760 else
2761 Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2762
2763 // With PIC, the address is actually $g + Offset.
2764 if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2765 Result =
2766 DAG.getNode(ISD::ADD, DL, PtrVT,
2767 DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2768 }
2769
2770 // For globals that require a load from a stub to get the address, emit the
2771 // load.
2772 if (M68kII::isGlobalStubReference(OpFlags)) {
2773 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2775 }
2776
2777 // If there was a non-zero offset that we didn't fold, create an explicit
2778 // addition for it.
2779 if (Offset != 0) {
2780 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2781 DAG.getConstant(Offset, DL, PtrVT));
2782 }
2783
2784 return Result;
2785}
2786
2787SDValue M68kTargetLowering::LowerGlobalAddress(SDValue Op,
2788 SelectionDAG &DAG) const {
2789 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2790 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
2791 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
2792}
2793
2794//===----------------------------------------------------------------------===//
2795// Custom Lower Jump Table
2796//===----------------------------------------------------------------------===//
2797
2798SDValue M68kTargetLowering::LowerJumpTable(SDValue Op,
2799 SelectionDAG &DAG) const {
2800 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2801
2802 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2803 // global base reg.
2804 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2805
2806 unsigned WrapperKind = M68kISD::Wrapper;
2807 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2808 WrapperKind = M68kISD::WrapperPC;
2809 }
2810
2811 auto PtrVT = getPointerTy(DAG.getDataLayout());
2812 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
2813 SDLoc DL(JT);
2814 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2815
2816 // With PIC, the address is actually $g + Offset.
2818 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2819 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2820 Result);
2821 }
2822
2823 return Result;
2824}
2825
2827 return Subtarget.getJumpTableEncoding();
2828}
2829
2831 const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
2832 unsigned uid, MCContext &Ctx) const {
2834 Ctx);
2835}
2836
2838 SelectionDAG &DAG) const {
2842
2843 // MachineJumpTableInfo::EK_LabelDifference32 entry
2844 return Table;
2845}
2846
2847// NOTE This only used for MachineJumpTableInfo::EK_LabelDifference32 entries
2849 const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const {
2850 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
2851}
2852
2855 if (Constraint.size() > 0) {
2856 switch (Constraint[0]) {
2857 case 'a':
2858 case 'd':
2859 return C_RegisterClass;
2860 case 'I':
2861 case 'J':
2862 case 'K':
2863 case 'L':
2864 case 'M':
2865 case 'N':
2866 case 'O':
2867 case 'P':
2868 return C_Immediate;
2869 case 'C':
2870 if (Constraint.size() == 2)
2871 switch (Constraint[1]) {
2872 case '0':
2873 case 'i':
2874 case 'j':
2875 return C_Immediate;
2876 default:
2877 break;
2878 }
2879 break;
2880 case 'Q':
2881 case 'U':
2882 return C_Memory;
2883 default:
2884 break;
2885 }
2886 }
2887
2888 return TargetLowering::getConstraintType(Constraint);
2889}
2890
2892 StringRef Constraint,
2893 std::vector<SDValue> &Ops,
2894 SelectionDAG &DAG) const {
2895 SDValue Result;
2896
2897 if (Constraint.size() == 1) {
2898 // Constant constraints
2899 switch (Constraint[0]) {
2900 case 'I':
2901 case 'J':
2902 case 'K':
2903 case 'L':
2904 case 'M':
2905 case 'N':
2906 case 'O':
2907 case 'P': {
2908 auto *C = dyn_cast<ConstantSDNode>(Op);
2909 if (!C)
2910 return;
2911
2912 int64_t Val = C->getSExtValue();
2913 switch (Constraint[0]) {
2914 case 'I': // constant integer in the range [1,8]
2915 if (Val > 0 && Val <= 8)
2916 break;
2917 return;
2918 case 'J': // constant signed 16-bit integer
2919 if (isInt<16>(Val))
2920 break;
2921 return;
2922 case 'K': // constant that is NOT in the range of [-0x80, 0x80)
2923 if (Val < -0x80 || Val >= 0x80)
2924 break;
2925 return;
2926 case 'L': // constant integer in the range [-8,-1]
2927 if (Val < 0 && Val >= -8)
2928 break;
2929 return;
2930 case 'M': // constant that is NOT in the range of [-0x100, 0x100]
2931 if (Val < -0x100 || Val >= 0x100)
2932 break;
2933 return;
2934 case 'N': // constant integer in the range [24,31]
2935 if (Val >= 24 && Val <= 31)
2936 break;
2937 return;
2938 case 'O': // constant integer 16
2939 if (Val == 16)
2940 break;
2941 return;
2942 case 'P': // constant integer in the range [8,15]
2943 if (Val >= 8 && Val <= 15)
2944 break;
2945 return;
2946 default:
2947 llvm_unreachable("Unhandled constant constraint");
2948 }
2949
2950 Result = DAG.getSignedTargetConstant(Val, SDLoc(Op), Op.getValueType());
2951 break;
2952 }
2953 default:
2954 break;
2955 }
2956 }
2957
2958 if (Constraint.size() == 2) {
2959 switch (Constraint[0]) {
2960 case 'C':
2961 // Constant constraints start with 'C'
2962 switch (Constraint[1]) {
2963 case '0':
2964 case 'i':
2965 case 'j': {
2966 auto *C = dyn_cast<ConstantSDNode>(Op);
2967 if (!C)
2968 break;
2969
2970 int64_t Val = C->getSExtValue();
2971 switch (Constraint[1]) {
2972 case '0': // constant integer 0
2973 if (!Val)
2974 break;
2975 return;
2976 case 'i': // constant integer
2977 break;
2978 case 'j': // integer constant that doesn't fit in 16 bits
2979 if (!isInt<16>(C->getSExtValue()))
2980 break;
2981 return;
2982 default:
2983 llvm_unreachable("Unhandled constant constraint");
2984 }
2985
2986 Result = DAG.getSignedTargetConstant(Val, SDLoc(Op), Op.getValueType());
2987 break;
2988 }
2989 default:
2990 break;
2991 }
2992 break;
2993 default:
2994 break;
2995 }
2996 }
2997
2998 if (Result.getNode()) {
2999 Ops.push_back(Result);
3000 return;
3001 }
3002
3003 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3004}
3005
3006std::pair<unsigned, const TargetRegisterClass *>
3008 StringRef Constraint,
3009 MVT VT) const {
3010 if (Constraint.size() == 1) {
3011 switch (Constraint[0]) {
3012 case 'r':
3013 case 'd':
3014 switch (VT.SimpleTy) {
3015 case MVT::i8:
3016 return std::make_pair(0U, &M68k::DR8RegClass);
3017 case MVT::i16:
3018 return std::make_pair(0U, &M68k::DR16RegClass);
3019 case MVT::i32:
3020 return std::make_pair(0U, &M68k::DR32RegClass);
3021 default:
3022 break;
3023 }
3024 break;
3025 case 'a':
3026 switch (VT.SimpleTy) {
3027 case MVT::i16:
3028 return std::make_pair(0U, &M68k::AR16RegClass);
3029 case MVT::i32:
3030 return std::make_pair(0U, &M68k::AR32RegClass);
3031 default:
3032 break;
3033 }
3034 break;
3035 default:
3036 break;
3037 }
3038 }
3039
3040 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3041}
3042
3043/// Determines whether the callee is required to pop its own arguments.
3044/// Callee pop is necessary to support tail calls.
3045bool M68k::isCalleePop(CallingConv::ID CC, bool IsVarArg, bool GuaranteeTCO) {
3046 return CC == CallingConv::M68k_RTD && !IsVarArg;
3047}
3048
3049// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
3050// together with other CMOV pseudo-opcodes into a single basic-block with
3051// conditional jump around it.
3053 switch (MI.getOpcode()) {
3054 case M68k::CMOV8d:
3055 case M68k::CMOV16d:
3056 case M68k::CMOV32r:
3057 return true;
3058
3059 default:
3060 return false;
3061 }
3062}
3063
3064// The CCR operand of SelectItr might be missing a kill marker
3065// because there were multiple uses of CCR, and ISel didn't know
3066// which to mark. Figure out whether SelectItr should have had a
3067// kill marker, and set it if it should. Returns the correct kill
3068// marker value.
3071 const TargetRegisterInfo *TRI) {
3072 // Scan forward through BB for a use/def of CCR.
3073 MachineBasicBlock::iterator miI(std::next(SelectItr));
3074 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
3075 const MachineInstr &mi = *miI;
3076 if (mi.readsRegister(M68k::CCR, /*TRI=*/nullptr))
3077 return false;
3078 if (mi.definesRegister(M68k::CCR, /*TRI=*/nullptr))
3079 break; // Should have kill-flag - update below.
3080 }
3081
3082 // If we hit the end of the block, check whether CCR is live into a
3083 // successor.
3084 if (miI == BB->end())
3085 for (const auto *SBB : BB->successors())
3086 if (SBB->isLiveIn(M68k::CCR))
3087 return false;
3088
3089 // We found a def, or hit the end of the basic block and CCR wasn't live
3090 // out. SelectMI should have a kill flag on CCR.
3091 SelectItr->addRegisterKilled(M68k::CCR, TRI);
3092 return true;
3093}
3094
3096M68kTargetLowering::EmitLoweredSelect(MachineInstr &MI,
3097 MachineBasicBlock *MBB) const {
3098 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
3099 DebugLoc DL = MI.getDebugLoc();
3100
3101 // To "insert" a SELECT_CC instruction, we actually have to insert the
3102 // diamond control-flow pattern. The incoming instruction knows the
3103 // destination vreg to set, the condition code register to branch on, the
3104 // true/false values to select between, and a branch opcode to use.
3105 const BasicBlock *BB = MBB->getBasicBlock();
3107
3108 // ThisMBB:
3109 // ...
3110 // TrueVal = ...
3111 // cmp ccX, r1, r2
3112 // bcc Copy1MBB
3113 // fallthrough --> Copy0MBB
3114 MachineBasicBlock *ThisMBB = MBB;
3116
3117 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
3118 // as described above, by inserting a MBB, and then making a PHI at the join
3119 // point to select the true and false operands of the CMOV in the PHI.
3120 //
3121 // The code also handles two different cases of multiple CMOV opcodes
3122 // in a row.
3123 //
3124 // Case 1:
3125 // In this case, there are multiple CMOVs in a row, all which are based on
3126 // the same condition setting (or the exact opposite condition setting).
3127 // In this case we can lower all the CMOVs using a single inserted MBB, and
3128 // then make a number of PHIs at the join point to model the CMOVs. The only
3129 // trickiness here, is that in a case like:
3130 //
3131 // t2 = CMOV cond1 t1, f1
3132 // t3 = CMOV cond1 t2, f2
3133 //
3134 // when rewriting this into PHIs, we have to perform some renaming on the
3135 // temps since you cannot have a PHI operand refer to a PHI result earlier
3136 // in the same block. The "simple" but wrong lowering would be:
3137 //
3138 // t2 = PHI t1(BB1), f1(BB2)
3139 // t3 = PHI t2(BB1), f2(BB2)
3140 //
3141 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
3142 // renaming is to note that on the path through BB1, t2 is really just a
3143 // copy of t1, and do that renaming, properly generating:
3144 //
3145 // t2 = PHI t1(BB1), f1(BB2)
3146 // t3 = PHI t1(BB1), f2(BB2)
3147 //
3148 // Case 2, we lower cascaded CMOVs such as
3149 //
3150 // (CMOV (CMOV F, T, cc1), T, cc2)
3151 //
3152 // to two successives branches.
3153 MachineInstr *CascadedCMOV = nullptr;
3154 MachineInstr *LastCMOV = &MI;
3155 M68k::CondCode CC = M68k::CondCode(MI.getOperand(3).getImm());
3158 std::next(MachineBasicBlock::iterator(MI));
3159
3160 // Check for case 1, where there are multiple CMOVs with the same condition
3161 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
3162 // number of jumps the most.
3163
3164 if (isCMOVPseudo(MI)) {
3165 // See if we have a string of CMOVS with the same condition.
3166 while (NextMIIt != MBB->end() && isCMOVPseudo(*NextMIIt) &&
3167 (NextMIIt->getOperand(3).getImm() == CC ||
3168 NextMIIt->getOperand(3).getImm() == OppCC)) {
3169 LastCMOV = &*NextMIIt;
3170 ++NextMIIt;
3171 }
3172 }
3173
3174 // This checks for case 2, but only do this if we didn't already find
3175 // case 1, as indicated by LastCMOV == MI.
3176 if (LastCMOV == &MI && NextMIIt != MBB->end() &&
3177 NextMIIt->getOpcode() == MI.getOpcode() &&
3178 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
3179 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
3180 NextMIIt->getOperand(1).isKill()) {
3181 CascadedCMOV = &*NextMIIt;
3182 }
3183
3184 MachineBasicBlock *Jcc1MBB = nullptr;
3185
3186 // If we have a cascaded CMOV, we lower it to two successive branches to
3187 // the same block. CCR is used by both, so mark it as live in the second.
3188 if (CascadedCMOV) {
3189 Jcc1MBB = F->CreateMachineBasicBlock(BB);
3190 F->insert(It, Jcc1MBB);
3191 Jcc1MBB->addLiveIn(M68k::CCR);
3192 }
3193
3194 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(BB);
3195 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
3196 F->insert(It, Copy0MBB);
3197 F->insert(It, SinkMBB);
3198
3199 // Set the call frame size on entry to the new basic blocks.
3200 unsigned CallFrameSize = TII->getCallFrameSizeAt(MI);
3201 Copy0MBB->setCallFrameSize(CallFrameSize);
3202 SinkMBB->setCallFrameSize(CallFrameSize);
3203
3204 // If the CCR register isn't dead in the terminator, then claim that it's
3205 // live into the sink and copy blocks.
3206 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3207
3208 MachineInstr *LastCCRSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
3209 if (!LastCCRSUser->killsRegister(M68k::CCR, /*TRI=*/nullptr) &&
3210 !checkAndUpdateCCRKill(LastCCRSUser, MBB, TRI)) {
3211 Copy0MBB->addLiveIn(M68k::CCR);
3212 SinkMBB->addLiveIn(M68k::CCR);
3213 }
3214
3215 // Transfer the remainder of MBB and its successor edges to SinkMBB.
3216 SinkMBB->splice(SinkMBB->begin(), MBB,
3217 std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
3219
3220 // Add the true and fallthrough blocks as its successors.
3221 if (CascadedCMOV) {
3222 // The fallthrough block may be Jcc1MBB, if we have a cascaded CMOV.
3223 MBB->addSuccessor(Jcc1MBB);
3224
3225 // In that case, Jcc1MBB will itself fallthrough the Copy0MBB, and
3226 // jump to the SinkMBB.
3227 Jcc1MBB->addSuccessor(Copy0MBB);
3228 Jcc1MBB->addSuccessor(SinkMBB);
3229 } else {
3230 MBB->addSuccessor(Copy0MBB);
3231 }
3232
3233 // The true block target of the first (or only) branch is always SinkMBB.
3234 MBB->addSuccessor(SinkMBB);
3235
3236 // Create the conditional branch instruction.
3237 unsigned Opc = M68k::GetCondBranchFromCond(CC);
3238 BuildMI(MBB, DL, TII->get(Opc)).addMBB(SinkMBB);
3239
3240 if (CascadedCMOV) {
3241 unsigned Opc2 = M68k::GetCondBranchFromCond(
3242 (M68k::CondCode)CascadedCMOV->getOperand(3).getImm());
3243 BuildMI(Jcc1MBB, DL, TII->get(Opc2)).addMBB(SinkMBB);
3244 }
3245
3246 // Copy0MBB:
3247 // %FalseValue = ...
3248 // # fallthrough to SinkMBB
3249 Copy0MBB->addSuccessor(SinkMBB);
3250
3251 // SinkMBB:
3252 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
3253 // ...
3256 std::next(MachineBasicBlock::iterator(LastCMOV));
3257 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
3260
3261 // As we are creating the PHIs, we have to be careful if there is more than
3262 // one. Later CMOVs may reference the results of earlier CMOVs, but later
3263 // PHIs have to reference the individual true/false inputs from earlier PHIs.
3264 // That also means that PHI construction must work forward from earlier to
3265 // later, and that the code must maintain a mapping from earlier PHI's
3266 // destination registers, and the registers that went into the PHI.
3267
3268 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
3269 Register DestReg = MIIt->getOperand(0).getReg();
3270 Register Op1Reg = MIIt->getOperand(1).getReg();
3271 Register Op2Reg = MIIt->getOperand(2).getReg();
3272
3273 // If this CMOV we are generating is the opposite condition from
3274 // the jump we generated, then we have to swap the operands for the
3275 // PHI that is going to be generated.
3276 if (MIIt->getOperand(3).getImm() == OppCC)
3277 std::swap(Op1Reg, Op2Reg);
3278
3279 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
3280 Op1Reg = RegRewriteTable[Op1Reg].first;
3281
3282 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
3283 Op2Reg = RegRewriteTable[Op2Reg].second;
3284
3285 MIB =
3286 BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(M68k::PHI), DestReg)
3287 .addReg(Op1Reg)
3288 .addMBB(Copy0MBB)
3289 .addReg(Op2Reg)
3290 .addMBB(ThisMBB);
3291
3292 // Add this PHI to the rewrite table.
3293 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
3294 }
3295
3296 // If we have a cascaded CMOV, the second Jcc provides the same incoming
3297 // value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
3298 if (CascadedCMOV) {
3299 MIB.addReg(MI.getOperand(2).getReg()).addMBB(Jcc1MBB);
3300 // Copy the PHI result to the register defined by the second CMOV.
3301 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
3302 DL, TII->get(TargetOpcode::COPY),
3303 CascadedCMOV->getOperand(0).getReg())
3304 .addReg(MI.getOperand(0).getReg());
3305 CascadedCMOV->eraseFromParent();
3306 }
3307
3308 // Now remove the CMOV(s).
3309 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;)
3310 (MIIt++)->eraseFromParent();
3311
3312 return SinkMBB;
3313}
3314
3316M68kTargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
3317 MachineBasicBlock *BB) const {
3318 llvm_unreachable("Cannot lower Segmented Stack Alloca with stack-split on");
3319}
3320
3323 MachineBasicBlock *BB) const {
3324 switch (MI.getOpcode()) {
3325 default:
3326 llvm_unreachable("Unexpected instr type to insert");
3327 case M68k::CMOV8d:
3328 case M68k::CMOV16d:
3329 case M68k::CMOV32r:
3330 return EmitLoweredSelect(MI, BB);
3331 case M68k::SALLOCA:
3332 return EmitLoweredSegAlloca(MI, BB);
3333 }
3334}
3335
3336SDValue M68kTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3338 auto PtrVT = getPointerTy(MF.getDataLayout());
3340
3341 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3342 SDLoc DL(Op);
3343
3344 // vastart just stores the address of the VarArgsFrameIndex slot into the
3345 // memory location argument.
3346 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3347 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
3348 MachinePointerInfo(SV));
3349}
3350
3351SDValue M68kTargetLowering::LowerATOMICFENCE(SDValue Op,
3352 SelectionDAG &DAG) const {
3353 // Lower to a memory barrier created from inline asm.
3354 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3355 LLVMContext &Ctx = *DAG.getContext();
3356
3357 const unsigned Flags = InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore |
3359 const SDValue AsmOperands[4] = {
3360 Op.getOperand(0), // Input chain
3362 "", TLI.getProgramPointerTy(
3363 DAG.getDataLayout())), // Empty inline asm string
3364 DAG.getMDNode(MDNode::get(Ctx, {})), // (empty) srcloc
3365 DAG.getTargetConstant(Flags, SDLoc(Op),
3366 TLI.getPointerTy(DAG.getDataLayout())), // Flags
3367 };
3368
3369 return DAG.getNode(ISD::INLINEASM, SDLoc(Op),
3370 DAG.getVTList(MVT::Other, MVT::Glue), AsmOperands);
3371}
3372
3373// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
3374// Calls to _alloca are needed to probe the stack when allocating more than 4k
3375// bytes in one go. Touching the stack at 4K increments is necessary to ensure
3376// that the guard pages used by the OS virtual memory manager are allocated in
3377// correct sequence.
3378SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
3379 SelectionDAG &DAG) const {
3381 bool SplitStack = MF.shouldSplitStack();
3382
3383 SDLoc DL(Op);
3384
3385 // Get the inputs.
3386 SDNode *Node = Op.getNode();
3387 SDValue Chain = Op.getOperand(0);
3388 SDValue Size = Op.getOperand(1);
3389 unsigned Align = Op.getConstantOperandVal(2);
3390 EVT VT = Node->getValueType(0);
3391
3392 // Chain the dynamic stack allocation so that it doesn't modify the stack
3393 // pointer when other instructions are using the stack.
3394 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
3395
3397 if (SplitStack) {
3398 auto &MRI = MF.getRegInfo();
3399 auto SPTy = getPointerTy(DAG.getDataLayout());
3400 auto *ARClass = getRegClassFor(SPTy);
3401 Register Vreg = MRI.createVirtualRegister(ARClass);
3402 Chain = DAG.getCopyToReg(Chain, DL, Vreg, Size);
3403 Result = DAG.getNode(M68kISD::SEG_ALLOCA, DL, SPTy, Chain,
3404 DAG.getRegister(Vreg, SPTy));
3405 } else {
3406 auto &TLI = DAG.getTargetLoweringInfo();
3408 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
3409 " not tell us which reg is the stack pointer!");
3410
3411 SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT);
3412 Chain = SP.getValue(1);
3413 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3414 unsigned StackAlign = TFI.getStackAlignment();
3415 Result = DAG.getNode(ISD::SUB, DL, VT, SP, Size); // Value
3416 if (Align > StackAlign)
3417 Result = DAG.getNode(ISD::AND, DL, VT, Result,
3418 DAG.getSignedConstant(-(uint64_t)Align, DL, VT));
3419 Chain = DAG.getCopyToReg(Chain, DL, SPReg, Result); // Output chain
3420 }
3421
3422 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), DL);
3423
3424 SDValue Ops[2] = {Result, Chain};
3425 return DAG.getMergeValues(Ops, DL);
3426}
3427
3428SDValue M68kTargetLowering::LowerShiftLeftParts(SDValue Op,
3429 SelectionDAG &DAG) const {
3430 SDLoc DL(Op);
3431 SDValue Lo = Op.getOperand(0);
3432 SDValue Hi = Op.getOperand(1);
3433 SDValue Shamt = Op.getOperand(2);
3434 EVT VT = Lo.getValueType();
3435
3436 // if Shamt - register size < 0: // Shamt < register size
3437 // Lo = Lo << Shamt
3438 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (register size - 1 ^ Shamt))
3439 // else:
3440 // Lo = 0
3441 // Hi = Lo << (Shamt - register size)
3442
3443 SDValue Zero = DAG.getConstant(0, DL, VT);
3444 SDValue One = DAG.getConstant(1, DL, VT);
3445 SDValue MinusRegisterSize = DAG.getSignedConstant(-32, DL, VT);
3446 SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
3447 SDValue ShamtMinusRegisterSize =
3448 DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
3449 SDValue RegisterSizeMinus1Shamt =
3450 DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
3451
3452 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3453 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3454 SDValue ShiftRightLo =
3455 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, RegisterSizeMinus1Shamt);
3456 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3457 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3458 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusRegisterSize);
3459
3460 SDValue CC =
3461 DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
3462
3463 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3464 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3465
3466 return DAG.getMergeValues({Lo, Hi}, DL);
3467}
3468
3469SDValue M68kTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3470 bool IsSRA) const {
3471 SDLoc DL(Op);
3472 SDValue Lo = Op.getOperand(0);
3473 SDValue Hi = Op.getOperand(1);
3474 SDValue Shamt = Op.getOperand(2);
3475 EVT VT = Lo.getValueType();
3476
3477 // SRA expansion:
3478 // if Shamt - register size < 0: // Shamt < register size
3479 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
3480 // Hi = Hi >>s Shamt
3481 // else:
3482 // Lo = Hi >>s (Shamt - register size);
3483 // Hi = Hi >>s (register size - 1)
3484 //
3485 // SRL expansion:
3486 // if Shamt - register size < 0: // Shamt < register size
3487 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
3488 // Hi = Hi >>u Shamt
3489 // else:
3490 // Lo = Hi >>u (Shamt - register size);
3491 // Hi = 0;
3492
3493 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3494
3495 SDValue Zero = DAG.getConstant(0, DL, VT);
3496 SDValue One = DAG.getConstant(1, DL, VT);
3497 SDValue MinusRegisterSize = DAG.getSignedConstant(-32, DL, VT);
3498 SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
3499 SDValue ShamtMinusRegisterSize =
3500 DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
3501 SDValue RegisterSizeMinus1Shamt =
3502 DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
3503
3504 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3505 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3506 SDValue ShiftLeftHi =
3507 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, RegisterSizeMinus1Shamt);
3508 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3509 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3510 SDValue LoFalse =
3511 DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusRegisterSize);
3512 SDValue HiFalse =
3513 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, RegisterSizeMinus1) : Zero;
3514
3515 SDValue CC =
3516 DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
3517
3518 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3519 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3520
3521 return DAG.getMergeValues({Lo, Hi}, DL);
3522}
3523
3524//===----------------------------------------------------------------------===//
3525// DAG Combine
3526//===----------------------------------------------------------------------===//
3527
3529 SelectionDAG &DAG) {
3530 return DAG.getNode(M68kISD::SETCC, dl, MVT::i8,
3531 DAG.getConstant(Cond, dl, MVT::i8), CCR);
3532}
3533// When legalizing carry, we create carries via add X, -1
3534// If that comes from an actual carry, via setcc, we use the
3535// carry directly.
3537 if (CCR.getOpcode() == M68kISD::ADD) {
3538 if (isAllOnesConstant(CCR.getOperand(1))) {
3539 SDValue Carry = CCR.getOperand(0);
3540 while (Carry.getOpcode() == ISD::TRUNCATE ||
3541 Carry.getOpcode() == ISD::ZERO_EXTEND ||
3542 Carry.getOpcode() == ISD::SIGN_EXTEND ||
3543 Carry.getOpcode() == ISD::ANY_EXTEND ||
3544 (Carry.getOpcode() == ISD::AND &&
3545 isOneConstant(Carry.getOperand(1))))
3546 Carry = Carry.getOperand(0);
3547 if (Carry.getOpcode() == M68kISD::SETCC ||
3548 Carry.getOpcode() == M68kISD::SETCC_CARRY) {
3549 if (Carry.getConstantOperandVal(0) == M68k::COND_CS)
3550 return Carry.getOperand(1);
3551 }
3552 }
3553 }
3554
3555 return SDValue();
3556}
3557
3558/// Optimize a CCR definition used according to the condition code \p CC into
3559/// a simpler CCR value, potentially returning a new \p CC and replacing uses
3560/// of chain values.
3562 SelectionDAG &DAG,
3563 const M68kSubtarget &Subtarget) {
3564 if (CC == M68k::COND_CS)
3565 if (SDValue Flags = combineCarryThroughADD(CCR))
3566 return Flags;
3567
3568 return SDValue();
3569}
3570
3571// Optimize RES = M68kISD::SETCC CONDCODE, CCR_INPUT
3573 const M68kSubtarget &Subtarget) {
3574 SDLoc DL(N);
3575 M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(0));
3576 SDValue CCR = N->getOperand(1);
3577
3578 // Try to simplify the CCR and condition code operands.
3579 if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget))
3580 return getSETCC(CC, Flags, DL, DAG);
3581
3582 return SDValue();
3583}
3585 const M68kSubtarget &Subtarget) {
3586 SDLoc DL(N);
3587 M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(2));
3588 SDValue CCR = N->getOperand(3);
3589
3590 // Try to simplify the CCR and condition code operands.
3591 // Make sure to not keep references to operands, as combineSetCCCCR can
3592 // RAUW them under us.
3593 if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget)) {
3594 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
3595 return DAG.getNode(M68kISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
3596 N->getOperand(1), Cond, Flags);
3597 }
3598
3599 return SDValue();
3600}
3601
3603 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3604 MVT VT = N->getSimpleValueType(0);
3605 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3606 return DAG.getNode(M68kISD::SUBX, SDLoc(N), VTs, N->getOperand(0),
3607 N->getOperand(1), Flags);
3608 }
3609
3610 return SDValue();
3611}
3612
3613// Optimize RES, CCR = M68kISD::ADDX LHS, RHS, CCR
3616 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3617 MVT VT = N->getSimpleValueType(0);
3618 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3619 return DAG.getNode(M68kISD::ADDX, SDLoc(N), VTs, N->getOperand(0),
3620 N->getOperand(1), Flags);
3621 }
3622
3623 return SDValue();
3624}
3625
3626SDValue M68kTargetLowering::PerformDAGCombine(SDNode *N,
3627 DAGCombinerInfo &DCI) const {
3628 SelectionDAG &DAG = DCI.DAG;
3629 switch (N->getOpcode()) {
3630 case M68kISD::SUBX:
3631 return combineSUBX(N, DAG);
3632 case M68kISD::ADDX:
3633 return combineADDX(N, DAG, DCI);
3634 case M68kISD::SETCC:
3635 return combineM68kSetCC(N, DAG, Subtarget);
3636 case M68kISD::BRCOND:
3637 return combineM68kBrCond(N, DAG, Subtarget);
3638 }
3639
3640 return SDValue();
3641}
3642
3643//===----------------------------------------------------------------------===//
3644// M68kISD Node Names
3645//===----------------------------------------------------------------------===//
3646const char *M68kTargetLowering::getTargetNodeName(unsigned Opcode) const {
3647 switch (Opcode) {
3648 case M68kISD::CALL:
3649 return "M68kISD::CALL";
3650 case M68kISD::TAIL_CALL:
3651 return "M68kISD::TAIL_CALL";
3652 case M68kISD::RET:
3653 return "M68kISD::RET";
3654 case M68kISD::TC_RETURN:
3655 return "M68kISD::TC_RETURN";
3656 case M68kISD::ADD:
3657 return "M68kISD::ADD";
3658 case M68kISD::SUB:
3659 return "M68kISD::SUB";
3660 case M68kISD::ADDX:
3661 return "M68kISD::ADDX";
3662 case M68kISD::SUBX:
3663 return "M68kISD::SUBX";
3664 case M68kISD::SMUL:
3665 return "M68kISD::SMUL";
3666 case M68kISD::UMUL:
3667 return "M68kISD::UMUL";
3668 case M68kISD::OR:
3669 return "M68kISD::OR";
3670 case M68kISD::XOR:
3671 return "M68kISD::XOR";
3672 case M68kISD::AND:
3673 return "M68kISD::AND";
3674 case M68kISD::CMP:
3675 return "M68kISD::CMP";
3676 case M68kISD::BTST:
3677 return "M68kISD::BTST";
3678 case M68kISD::SELECT:
3679 return "M68kISD::SELECT";
3680 case M68kISD::CMOV:
3681 return "M68kISD::CMOV";
3682 case M68kISD::BRCOND:
3683 return "M68kISD::BRCOND";
3684 case M68kISD::SETCC:
3685 return "M68kISD::SETCC";
3687 return "M68kISD::SETCC_CARRY";
3689 return "M68kISD::GLOBAL_BASE_REG";
3690 case M68kISD::Wrapper:
3691 return "M68kISD::Wrapper";
3692 case M68kISD::WrapperPC:
3693 return "M68kISD::WrapperPC";
3695 return "M68kISD::SEG_ALLOCA";
3696 default:
3697 return NULL;
3698 }
3699}
3700
3702 bool IsVarArg) const {
3703 if (Return)
3704 return RetCC_M68k_C;
3705 else
3706 return CC_M68k_C;
3707}
unsigned const MachineRegisterInfo * MRI
static SDValue getSETCC(AArch64CC::CondCode CC, SDValue NZCV, const SDLoc &DL, SelectionDAG &DAG)
Helper function to create 'CSET', which is equivalent to 'CSINC <Wd>, WZR, WZR, invert(<cond>)'.
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
return RetTy
uint64_t Addr
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
IRTranslator LLVM IR MI
static LVOptions Options
Definition: LVOptions.cpp:25
This file contains the custom routines for the M68k Calling Convention that aren't done by tablegen.
static SDValue LowerTruncateToBTST(SDValue Op, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
static void lowerOverflowArithmetic(SDValue Op, SelectionDAG &DAG, SDValue &Result, SDValue &CCR, unsigned &CC)
static SDValue combineADDX(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI)
static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc)
Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes each of which has no other use...
static bool hasNonFlagsUse(SDValue Op)
return true if Op has a use that doesn't just read flags.
static bool isM68kCCUnsigned(unsigned M68kCC)
Return true if the condition is an unsigned comparison operation.
static StructReturnType callIsStructReturn(const SmallVectorImpl< ISD::OutputArg > &Outs)
static bool isXor1OfSetCC(SDValue Op)
Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the SETCC node has a single use...
static SDValue LowerAndToBTST(SDValue And, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
Result of 'and' is compared against zero. Change to a BTST node if possible.
static SDValue combineM68kBrCond(SDNode *N, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
static M68k::CondCode TranslateIntegerM68kCC(ISD::CondCode SetCCOpcode)
static StructReturnType argsAreStructReturn(const SmallVectorImpl< ISD::InputArg > &Ins)
Determines whether a function uses struct return semantics.
static bool isCMOVPseudo(MachineInstr &MI)
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt)
Return true if the function is being made into a tailcall target by changing its ABI.
static bool isM68kLogicalCmp(SDValue Op)
Return true if opcode is a M68k logical comparison.
static SDValue combineM68kSetCC(SDNode *N, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
static SDValue combineSetCCCCR(SDValue CCR, M68k::CondCode &CC, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
Optimize a CCR definition used according to the condition code CC into a simpler CCR value,...
static SDValue combineCarryThroughADD(SDValue CCR)
static bool isOverflowArithmetic(unsigned Opcode)
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const M68kInstrInfo *TII, const CCValAssign &VA)
Return true if the given stack call argument is already available in the same position (relatively) o...
static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
Create a BTST (Bit Test) node - Test bit BitNo in Src and set condition according to equal/not-equal ...
StructReturnType
@ NotStructReturn
@ RegStructReturn
@ StackStructReturn
static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG)
static bool checkAndUpdateCCRKill(MachineBasicBlock::iterator SelectItr, MachineBasicBlock *BB, const TargetRegisterInfo *TRI)
static SDValue combineSUBX(SDNode *N, SelectionDAG &DAG)
static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL, bool IsFP, SDValue &LHS, SDValue &RHS, SelectionDAG &DAG)
Do a one-to-one translation of a ISD::CondCode to the M68k-specific condition code,...
This file defines the interfaces that M68k uses to lower LLVM code into a selection DAG.
This file declares the M68k specific subclass of MachineFunctionInfo.
This file declares the M68k specific subclass of TargetSubtargetInfo.
This file declares the M68k specific subclass of TargetMachine.
This file contains declarations for M68k ELF object file lowering.
#define F(x, y, z)
Definition: MD5.cpp:55
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
#define T1
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static constexpr Register SPReg
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define OP(OPC)
Definition: Instruction.h:45
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:296
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
The address of a basic block.
Definition: Constants.h:893
CCState - This class holds information needed while lowering arguments and return values.
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool isMemLoc() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
unsigned getValNo() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
iterator end()
Definition: DenseMap.h:84
iterator_range< arg_iterator > args()
Definition: Function.h:892
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:766
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:704
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:277
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:688
const GlobalValue * getGlobal() const
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:278
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setBytesToPopOnReturn(unsigned bytes)
void setArgumentStackSize(unsigned size)
unsigned char classifyExternalReference(const Module &M) const
Classify a external variable reference for the current subtarget according to how we should reference...
unsigned char classifyBlockAddressReference() const
Classify a blockaddress reference for the current subtarget according to how we should reference it i...
unsigned getSlotSize() const
getSlotSize - Stack slot size in bytes.
const M68kInstrInfo * getInstrInfo() const override
unsigned char classifyGlobalReference(const GlobalValue *GV, const Module &M) const
Classify a global variable reference for the current subtarget according to how we should reference i...
unsigned getJumpTableEncoding() const
unsigned char classifyLocalReference(const GlobalValue *GV) const
Classify a global variable reference for the current subtarget according to how we should reference i...
const M68kRegisterInfo * getRegisterInfo() const override
bool atLeastM68020() const
Definition: M68kSubtarget.h:89
unsigned char classifyGlobalFunctionReference(const GlobalValue *GV, const Module &M) const
Classify a global function reference for the current subtarget.
bool isTargetELF() const
const M68kFrameLowering * getFrameLowering() const override
ConstraintType getConstraintType(StringRef ConstraintStr) const override
Given a constraint, return the type of constraint it is for this target.
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
CCAssignFn * getCCAssignFn(CallingConv::ID CC, bool Return, bool IsVarArg) const
M68kTargetLowering(const M68kTargetMachine &TM, const M68kSubtarget &STI)
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the value type to use for ISD::SETCC.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
Context object for machine code objects.
Definition: MCContext.h:83
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:398
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1543
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setObjectZExt(int ObjectIdx, bool IsZExt)
void setObjectSExt(int ObjectIdx, bool IsSExt)
void setHasTailCall(bool V=true)
bool isObjectZExt(int ObjectIdx) const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isObjectSExt(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
Definition: MachineInstr.h:69
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
int64_t getImm() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Class to represent pointers.
Definition: DerivedTypes.h:670
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:748
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
Definition: SelectionDAG.h:799
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:501
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:758
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
Definition: SelectionDAG.h:825
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:495
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:710
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:496
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:698
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:794
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:490
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
Definition: SelectionDAG.h:508
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:765
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:578
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
TargetOptions Options
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Value * getOperand(unsigned i) const
Definition: User.h:228
LLVM Value Representation.
Definition: Value.h:74
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
use_iterator use_begin()
Definition: Value.h:360
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
@ M68k_INTR
Used for M68k interrupt routines.
Definition: CallingConv.h:235
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ M68k_RTD
Used for M68k rtd-based CC (similar to X86's stdcall).
Definition: CallingConv.h:252
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:780
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:243
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1197
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1193
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:257
@ ATOMIC_LOAD_NAND
Definition: ISDOpcodes.h:1340
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:744
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1226
@ ATOMIC_LOAD_MAX
Definition: ISDOpcodes.h:1342
@ ATOMIC_LOAD_UMIN
Definition: ISDOpcodes.h:1343
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:276
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:814
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1304
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:262
@ ATOMIC_LOAD_OR
Definition: ISDOpcodes.h:1338
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:954
@ ATOMIC_LOAD_XOR
Definition: ISDOpcodes.h:1339
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ FrameIndex
Definition: ISDOpcodes.h:80
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:805
@ BR
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:1118
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
Definition: ISDOpcodes.h:788
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1148
@ SSUBO
Same for subtraction.
Definition: ISDOpcodes.h:334
@ ATOMIC_LOAD_MIN
Definition: ISDOpcodes.h:1341
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1127
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:757
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1222
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:215
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:330
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:674
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:735
@ ATOMIC_LOAD_AND
Definition: ISDOpcodes.h:1336
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:811
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:772
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
Definition: ISDOpcodes.h:1319
@ ATOMIC_LOAD_UMAX
Definition: ISDOpcodes.h:1344
@ SMULO
Same for multiplication.
Definition: ISDOpcodes.h:338
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1112
@ ConstantPool
Definition: ISDOpcodes.h:82
@ ATOMIC_LOAD_ADD
Definition: ISDOpcodes.h:1334
@ ATOMIC_LOAD_SUB
Definition: ISDOpcodes.h:1335
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:709
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:286
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1333
@ ExternalSymbol
Definition: ISDOpcodes.h:83
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1165
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:817
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1217
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1141
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:794
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ AssertZext
Definition: ISDOpcodes.h:62
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1613
static bool isPCRelBlockReference(unsigned char Flag)
Return True if the Block is referenced using PC.
Definition: M68kBaseInfo.h:243
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
Return true if the specified global value reference is relative to a 32-bit PIC base (M68kISD::GLOBAL...
Definition: M68kBaseInfo.h:221
static bool isGlobalStubReference(unsigned char TargetFlag)
Return true if the specified TargetFlag operand is a reference to a stub for a global,...
Definition: M68kBaseInfo.h:195
static bool isPCRelGlobalReference(unsigned char Flag)
Return True if the specified GlobalValue requires PC addressing mode.
Definition: M68kBaseInfo.h:232
@ MO_TLSLDM
On a symbol operand, this indicates that the immediate is the offset to the slot in GOT which stores ...
Definition: M68kBaseInfo.h:177
@ MO_TLSLE
On a symbol operand, this indicates that the immediate is the offset to the variable within in the th...
Definition: M68kBaseInfo.h:189
@ MO_TLSGD
On a symbol operand, this indicates that the immediate is the offset to the slot in GOT which stores ...
Definition: M68kBaseInfo.h:165
@ MO_GOTPCREL
On a symbol operand this indicates that the immediate is offset to the GOT entry for the symbol name ...
Definition: M68kBaseInfo.h:153
@ MO_TLSIE
On a symbol operand, this indicates that the immediate is the offset to the variable within the threa...
Definition: M68kBaseInfo.h:183
@ MO_TLSLD
On a symbol operand, this indicates that the immediate is the offset to variable within the thread lo...
Definition: M68kBaseInfo.h:171
static bool isDirectGlobalReference(unsigned char Flag)
Return True if the specified GlobalValue is a direct reference for a symbol.
Definition: M68kBaseInfo.h:207
@ SETCC
M68k SetCC.
@ BRCOND
M68k conditional branches.
@ CMOV
M68k conditional moves.
@ BTST
M68k bit-test instructions.
@ CMP
M68k compare and logical compare instructions.
@ SELECT
M68k Select.
@ WrapperPC
Special wrapper used under M68k PIC mode for PC relative displacements.
@ Wrapper
A wrapper node for TargetConstantPool, TargetExternalSymbol, and TargetGlobalAddress.
static bool IsSETCC(unsigned SETCC)
static unsigned GetCondBranchFromCond(M68k::CondCode CC)
Definition: M68kInstrInfo.h:97
bool isCalleePop(CallingConv::ID CallingConv, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
static M68k::CondCode GetOppositeBranchCondition(M68k::CondCode CC)
Definition: M68kInstrInfo.h:58
@ GeneralDynamic
Definition: CodeGen.h:46
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Definition: MathExtras.h:359
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
@ Mod
The access may modify the value stored in memory.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
uint64_t getScalarSizeInBits() const
Definition: ValueTypes.h:380
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:323
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
Definition: ValueTypes.h:303
Describes a register that needs to be forwarded from the prologue to a musttail call.
Custom state to propagate llvm type info to register CC assigner.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)