LLVM 17.0.0git
M68kISelLowering.cpp
Go to the documentation of this file.
1//===-- M68kISelLowering.cpp - M68k DAG Lowering Impl -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the interfaces that M68k uses to lower LLVM code into a
11/// selection DAG.
12///
13//===----------------------------------------------------------------------===//
14
15#include "M68kISelLowering.h"
16#include "M68kCallingConv.h"
17#include "M68kMachineFunction.h"
18#include "M68kSubtarget.h"
19#include "M68kTargetMachine.h"
21
22#include "llvm/ADT/Statistic.h"
31#include "llvm/IR/CallingConv.h"
35#include "llvm/Support/Debug.h"
39
40using namespace llvm;
41
42#define DEBUG_TYPE "M68k-isel"
43
44STATISTIC(NumTailCalls, "Number of tail calls");
45
47 const M68kSubtarget &STI)
48 : TargetLowering(TM), Subtarget(STI), TM(TM) {
49
50 MVT PtrVT = MVT::i32;
51
53
54 auto *RegInfo = Subtarget.getRegisterInfo();
55 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
56
57 // Set up the register classes.
58 addRegisterClass(MVT::i8, &M68k::DR8RegClass);
59 addRegisterClass(MVT::i16, &M68k::XR16RegClass);
60 addRegisterClass(MVT::i32, &M68k::XR32RegClass);
61
62 for (auto VT : MVT::integer_valuetypes()) {
66 }
67
68 // We don't accept any truncstore of integer registers.
69 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
70 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
71 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
72 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
73 setTruncStoreAction(MVT::i32, MVT::i8, Expand);
74 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
75
78 if (Subtarget.atLeastM68020())
80 else
83
84 for (auto OP :
87 setOperationAction(OP, MVT::i8, Promote);
88 setOperationAction(OP, MVT::i16, Legal);
89 setOperationAction(OP, MVT::i32, LibCall);
90 }
91
92 for (auto OP : {ISD::UMUL_LOHI, ISD::SMUL_LOHI}) {
93 setOperationAction(OP, MVT::i8, Expand);
94 setOperationAction(OP, MVT::i16, Expand);
95 }
96
97 // FIXME It would be better to use a custom lowering
98 for (auto OP : {ISD::SMULO, ISD::UMULO}) {
99 setOperationAction(OP, MVT::i8, Expand);
100 setOperationAction(OP, MVT::i16, Expand);
101 setOperationAction(OP, MVT::i32, Expand);
102 }
103
105 setOperationAction(OP, MVT::i32, Custom);
106
107 // Add/Sub overflow ops with MVT::Glues are lowered to CCR dependences.
108 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
113 }
114
115 // SADDO and friends are legal with this setup, i hope
116 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
121 }
122
125
126 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
132 }
133
134 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
139 }
140
147
152
155
157
159
160 // We lower the `atomic-compare-and-swap` to `__sync_val_compare_and_swap`
161 // for subtarget < M68020
163 setOperationAction(ISD::ATOMIC_CMP_SWAP, {MVT::i8, MVT::i16, MVT::i32},
164 Subtarget.atLeastM68020() ? Legal : LibCall);
165
167
168 // M68k does not have native read-modify-write support, so expand all of them
169 // to `__sync_fetch_*` for target < M68020, otherwise expand to CmpxChg.
170 // See `shouldExpandAtomicRMWInIR` below.
172 {
184 },
185 {MVT::i8, MVT::i16, MVT::i32}, LibCall);
186
188}
189
192 return Subtarget.atLeastM68020()
195}
196
199 return M68k::D0;
200}
201
204 return M68k::D1;
205}
206
207unsigned
209 return StringSwitch<unsigned>(ConstraintCode)
211 .Case("U", InlineAsm::Constraint_Um) // We borrow Constraint_Um for 'U'.
213}
214
216 LLVMContext &Context, EVT VT) const {
217 // M68k SETcc producess either 0x00 or 0xFF
218 return MVT::i8;
219}
220
222 EVT Ty) const {
223 if (Ty.isSimple()) {
224 return Ty.getSimpleVT();
225 }
226 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
227}
228
229#include "M68kGenCallingConv.inc"
230
232
233static StructReturnType
235 if (Outs.empty())
236 return NotStructReturn;
237
238 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
239 if (!Flags.isSRet())
240 return NotStructReturn;
241 if (Flags.isInReg())
242 return RegStructReturn;
243 return StackStructReturn;
244}
245
246/// Determines whether a function uses struct return semantics.
247static StructReturnType
249 if (Ins.empty())
250 return NotStructReturn;
251
252 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
253 if (!Flags.isSRet())
254 return NotStructReturn;
255 if (Flags.isInReg())
256 return RegStructReturn;
257 return StackStructReturn;
258}
259
260/// Make a copy of an aggregate at address specified by "Src" to address
261/// "Dst" with size and alignment information specified by the specific
262/// parameter attribute. The copy will be passed as a byval function parameter.
264 SDValue Chain, ISD::ArgFlagsTy Flags,
265 SelectionDAG &DAG, const SDLoc &DL) {
266 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), DL, MVT::i32);
267
268 return DAG.getMemcpy(
269 Chain, DL, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
270 /*isVolatile=*/false, /*AlwaysInline=*/true,
271 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
272}
273
274/// Return true if the calling convention is one that we can guarantee TCO for.
275static bool canGuaranteeTCO(CallingConv::ID CC) { return false; }
276
277/// Return true if we might ever do TCO for calls with this calling convention.
279 switch (CC) {
280 // C calling conventions:
281 case CallingConv::C:
282 return true;
283 default:
284 return canGuaranteeTCO(CC);
285 }
286}
287
288/// Return true if the function is being made into a tailcall target by
289/// changing its ABI.
290static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
291 return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
292}
293
294/// Return true if the given stack call argument is already available in the
295/// same position (relatively) of the caller's incoming argument stack.
296static bool MatchingStackOffset(SDValue Arg, unsigned Offset,
299 const M68kInstrInfo *TII,
300 const CCValAssign &VA) {
301 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
302
303 for (;;) {
304 // Look through nodes that don't alter the bits of the incoming value.
305 unsigned Op = Arg.getOpcode();
306 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
307 Arg = Arg.getOperand(0);
308 continue;
309 }
310 if (Op == ISD::TRUNCATE) {
311 const SDValue &TruncInput = Arg.getOperand(0);
312 if (TruncInput.getOpcode() == ISD::AssertZext &&
313 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
314 Arg.getValueType()) {
315 Arg = TruncInput.getOperand(0);
316 continue;
317 }
318 }
319 break;
320 }
321
322 int FI = INT_MAX;
323 if (Arg.getOpcode() == ISD::CopyFromReg) {
324 Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
326 return false;
327 MachineInstr *Def = MRI->getVRegDef(VR);
328 if (!Def)
329 return false;
330 if (!Flags.isByVal()) {
331 if (!TII->isLoadFromStackSlot(*Def, FI))
332 return false;
333 } else {
334 unsigned Opcode = Def->getOpcode();
335 if ((Opcode == M68k::LEA32p || Opcode == M68k::LEA32f) &&
336 Def->getOperand(1).isFI()) {
337 FI = Def->getOperand(1).getIndex();
338 Bytes = Flags.getByValSize();
339 } else
340 return false;
341 }
342 } else if (auto *Ld = dyn_cast<LoadSDNode>(Arg)) {
343 if (Flags.isByVal())
344 // ByVal argument is passed in as a pointer but it's now being
345 // dereferenced. e.g.
346 // define @foo(%struct.X* %A) {
347 // tail call @bar(%struct.X* byval %A)
348 // }
349 return false;
350 SDValue Ptr = Ld->getBasePtr();
351 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
352 if (!FINode)
353 return false;
354 FI = FINode->getIndex();
355 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
356 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
357 FI = FINode->getIndex();
358 Bytes = Flags.getByValSize();
359 } else
360 return false;
361
362 assert(FI != INT_MAX);
363 if (!MFI.isFixedObjectIndex(FI))
364 return false;
365
366 if (Offset != MFI.getObjectOffset(FI))
367 return false;
368
369 if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
370 // If the argument location is wider than the argument type, check that any
371 // extension flags match.
372 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
373 Flags.isSExt() != MFI.isObjectSExt(FI)) {
374 return false;
375 }
376 }
377
378 return Bytes == MFI.getObjectSize(FI);
379}
380
382M68kTargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
385 int ReturnAddrIndex = FuncInfo->getRAIndex();
386
387 if (ReturnAddrIndex == 0) {
388 // Set up a frame object for the return address.
389 unsigned SlotSize = Subtarget.getSlotSize();
390 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(
391 SlotSize, -(int64_t)SlotSize, false);
392 FuncInfo->setRAIndex(ReturnAddrIndex);
393 }
394
395 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
396}
397
398SDValue M68kTargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
399 SDValue &OutRetAddr,
400 SDValue Chain,
401 bool IsTailCall, int FPDiff,
402 const SDLoc &DL) const {
403 EVT VT = getPointerTy(DAG.getDataLayout());
404 OutRetAddr = getReturnAddressFrameIndex(DAG);
405
406 // Load the "old" Return address.
407 OutRetAddr = DAG.getLoad(VT, DL, Chain, OutRetAddr, MachinePointerInfo());
408 return SDValue(OutRetAddr.getNode(), 1);
409}
410
411SDValue M68kTargetLowering::EmitTailCallStoreRetAddr(
412 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetFI,
413 EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &DL) const {
414 if (!FPDiff)
415 return Chain;
416
417 // Calculate the new stack slot for the return address.
418 int NewFO = MF.getFrameInfo().CreateFixedObject(
419 SlotSize, (int64_t)FPDiff - SlotSize, false);
420
421 SDValue NewFI = DAG.getFrameIndex(NewFO, PtrVT);
422 // Store the return address to the appropriate stack slot.
423 Chain = DAG.getStore(
424 Chain, DL, RetFI, NewFI,
426 return Chain;
427}
428
430M68kTargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
432 const SDLoc &DL, SelectionDAG &DAG,
433 const CCValAssign &VA,
434 MachineFrameInfo &MFI,
435 unsigned ArgIdx) const {
436 // Create the nodes corresponding to a load from this parameter slot.
437 ISD::ArgFlagsTy Flags = Ins[ArgIdx].Flags;
438 EVT ValVT;
439
440 // If value is passed by pointer we have address passed instead of the value
441 // itself.
443 ValVT = VA.getLocVT();
444 else
445 ValVT = VA.getValVT();
446
447 // Because we are dealing with BE architecture we need to offset loading of
448 // partial types
449 int Offset = VA.getLocMemOffset();
450 if (VA.getValVT() == MVT::i8) {
451 Offset += 3;
452 } else if (VA.getValVT() == MVT::i16) {
453 Offset += 2;
454 }
455
456 // TODO Interrupt handlers
457 // Calculate SP offset of interrupt parameter, re-arrange the slot normally
458 // taken by a return address.
459
460 // FIXME For now, all byval parameter objects are marked mutable. This can
461 // be changed with more analysis. In case of tail call optimization mark all
462 // arguments mutable. Since they could be overwritten by lowering of arguments
463 // in case of a tail call.
464 bool AlwaysUseMutable = shouldGuaranteeTCO(
465 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
466 bool IsImmutable = !AlwaysUseMutable && !Flags.isByVal();
467
468 if (Flags.isByVal()) {
469 unsigned Bytes = Flags.getByValSize();
470 if (Bytes == 0)
471 Bytes = 1; // Don't create zero-sized stack objects.
472 int FI = MFI.CreateFixedObject(Bytes, Offset, IsImmutable);
473 // TODO Interrupt handlers
474 // Adjust SP offset of interrupt parameter.
475 return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
476 } else {
477 int FI =
478 MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, Offset, IsImmutable);
479
480 // Set SExt or ZExt flag.
481 if (VA.getLocInfo() == CCValAssign::ZExt) {
482 MFI.setObjectZExt(FI, true);
483 } else if (VA.getLocInfo() == CCValAssign::SExt) {
484 MFI.setObjectSExt(FI, true);
485 }
486
487 // TODO Interrupt handlers
488 // Adjust SP offset of interrupt parameter.
489
490 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
491 SDValue Val = DAG.getLoad(
492 ValVT, DL, Chain, FIN,
494 return VA.isExtInLoc() ? DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val)
495 : Val;
496 }
497}
498
499SDValue M68kTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
500 SDValue Arg, const SDLoc &DL,
501 SelectionDAG &DAG,
502 const CCValAssign &VA,
503 ISD::ArgFlagsTy Flags) const {
504 unsigned LocMemOffset = VA.getLocMemOffset();
505 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, DL);
506 PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
507 StackPtr, PtrOff);
508 if (Flags.isByVal())
509 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, DL);
510
511 return DAG.getStore(
512 Chain, DL, Arg, PtrOff,
514}
515
516//===----------------------------------------------------------------------===//
517// Call
518//===----------------------------------------------------------------------===//
519
520SDValue M68kTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
521 SmallVectorImpl<SDValue> &InVals) const {
522 SelectionDAG &DAG = CLI.DAG;
523 SDLoc &DL = CLI.DL;
525 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
527 SDValue Chain = CLI.Chain;
528 SDValue Callee = CLI.Callee;
529 CallingConv::ID CallConv = CLI.CallConv;
530 bool &IsTailCall = CLI.IsTailCall;
531 bool IsVarArg = CLI.IsVarArg;
532
535 bool IsSibcall = false;
537 // const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
538
539 if (CallConv == CallingConv::M68k_INTR)
540 report_fatal_error("M68k interrupts may not be called directly");
541
542 auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
543 if (Attr.getValueAsBool())
544 IsTailCall = false;
545
546 // FIXME Add tailcalls support
547
548 bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
549 if (IsMustTail) {
550 // Force this to be a tail call. The verifier rules are enough to ensure
551 // that we can lower this successfully without moving the return address
552 // around.
553 IsTailCall = true;
554 } else if (IsTailCall) {
555 // Check if it's really possible to do a tail call.
556 IsTailCall = IsEligibleForTailCallOptimization(
557 Callee, CallConv, IsVarArg, SR != NotStructReturn,
558 MF.getFunction().hasStructRetAttr(), CLI.RetTy, Outs, OutVals, Ins,
559 DAG);
560
561 // Sibcalls are automatically detected tailcalls which do not require
562 // ABI changes.
563 if (!MF.getTarget().Options.GuaranteedTailCallOpt && IsTailCall)
564 IsSibcall = true;
565
566 if (IsTailCall)
567 ++NumTailCalls;
568 }
569
570 assert(!(IsVarArg && canGuaranteeTCO(CallConv)) &&
571 "Var args not supported with calling convention fastcc");
572
573 // Analyze operands of the call, assigning locations to each operand.
575 SmallVector<Type *, 4> ArgTypes;
576 for (const auto &Arg : CLI.getArgs())
577 ArgTypes.emplace_back(Arg.Ty);
578 M68kCCState CCInfo(ArgTypes, CallConv, IsVarArg, MF, ArgLocs,
579 *DAG.getContext());
580 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
581
582 // Get a count of how many bytes are to be pushed on the stack.
583 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
584 if (IsSibcall) {
585 // This is a sibcall. The memory operands are available in caller's
586 // own caller's stack.
587 NumBytes = 0;
588 } else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
589 canGuaranteeTCO(CallConv)) {
590 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
591 }
592
593 int FPDiff = 0;
594 if (IsTailCall && !IsSibcall && !IsMustTail) {
595 // Lower arguments at fp - stackoffset + fpdiff.
596 unsigned NumBytesCallerPushed = MFI->getBytesToPopOnReturn();
597
598 FPDiff = NumBytesCallerPushed - NumBytes;
599
600 // Set the delta of movement of the returnaddr stackslot.
601 // But only set if delta is greater than previous delta.
602 if (FPDiff < MFI->getTCReturnAddrDelta())
603 MFI->setTCReturnAddrDelta(FPDiff);
604 }
605
606 unsigned NumBytesToPush = NumBytes;
607 unsigned NumBytesToPop = NumBytes;
608
609 // If we have an inalloca argument, all stack space has already been allocated
610 // for us and be right at the top of the stack. We don't support multiple
611 // arguments passed in memory when using inalloca.
612 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
613 NumBytesToPush = 0;
614 if (!ArgLocs.back().isMemLoc())
615 report_fatal_error("cannot use inalloca attribute on a register "
616 "parameter");
617 if (ArgLocs.back().getLocMemOffset() != 0)
618 report_fatal_error("any parameter with the inalloca attribute must be "
619 "the only memory argument");
620 }
621
622 if (!IsSibcall)
623 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
624 NumBytes - NumBytesToPush, DL);
625
626 SDValue RetFI;
627 // Load return address for tail calls.
628 if (IsTailCall && FPDiff)
629 Chain = EmitTailCallLoadRetAddr(DAG, RetFI, Chain, IsTailCall, FPDiff, DL);
630
632 SmallVector<SDValue, 8> MemOpChains;
634
635 // Walk the register/memloc assignments, inserting copies/loads. In the case
636 // of tail call optimization arguments are handle later.
637 const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
638 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
639 ISD::ArgFlagsTy Flags = Outs[i].Flags;
640
641 // Skip inalloca arguments, they have already been written.
642 if (Flags.isInAlloca())
643 continue;
644
645 CCValAssign &VA = ArgLocs[i];
646 EVT RegVT = VA.getLocVT();
647 SDValue Arg = OutVals[i];
648 bool IsByVal = Flags.isByVal();
649
650 // Promote the value if needed.
651 switch (VA.getLocInfo()) {
652 default:
653 llvm_unreachable("Unknown loc info!");
655 break;
657 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
658 break;
660 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
661 break;
663 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
664 break;
666 Arg = DAG.getBitcast(RegVT, Arg);
667 break;
669 // Store the argument.
670 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
671 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
672 Chain = DAG.getStore(
673 Chain, DL, Arg, SpillSlot,
675 Arg = SpillSlot;
676 break;
677 }
678 }
679
680 if (VA.isRegLoc()) {
681 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
682 } else if (!IsSibcall && (!IsTailCall || IsByVal)) {
683 assert(VA.isMemLoc());
684 if (!StackPtr.getNode()) {
685 StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
687 }
688 MemOpChains.push_back(
689 LowerMemOpCallTo(Chain, StackPtr, Arg, DL, DAG, VA, Flags));
690 }
691 }
692
693 if (!MemOpChains.empty())
694 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
695
696 // FIXME Make sure PIC style GOT works as expected
697 // The only time GOT is really needed is for Medium-PIC static data
698 // otherwise we are happy with pc-rel or static references
699
700 if (IsVarArg && IsMustTail) {
701 const auto &Forwards = MFI->getForwardedMustTailRegParms();
702 for (const auto &F : Forwards) {
703 SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
704 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
705 }
706 }
707
708 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
709 // don't need this because the eligibility check rejects calls that require
710 // shuffling arguments passed in memory.
711 if (!IsSibcall && IsTailCall) {
712 // Force all the incoming stack arguments to be loaded from the stack
713 // before any new outgoing arguments are stored to the stack, because the
714 // outgoing stack slots may alias the incoming argument stack slots, and
715 // the alias isn't otherwise explicit. This is slightly more conservative
716 // than necessary, because it means that each store effectively depends
717 // on every argument instead of just those arguments it would clobber.
718 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
719
720 SmallVector<SDValue, 8> MemOpChains2;
721 SDValue FIN;
722 int FI = 0;
723 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
724 CCValAssign &VA = ArgLocs[i];
725 if (VA.isRegLoc())
726 continue;
727 assert(VA.isMemLoc());
728 SDValue Arg = OutVals[i];
729 ISD::ArgFlagsTy Flags = Outs[i].Flags;
730 // Skip inalloca arguments. They don't require any work.
731 if (Flags.isInAlloca())
732 continue;
733 // Create frame index.
734 int32_t Offset = VA.getLocMemOffset() + FPDiff;
735 uint32_t OpSize = (VA.getLocVT().getSizeInBits() + 7) / 8;
736 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
737 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
738
739 if (Flags.isByVal()) {
740 // Copy relative to framepointer.
742 if (!StackPtr.getNode()) {
743 StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
745 }
748
749 MemOpChains2.push_back(
750 CreateCopyOfByValArgument(Source, FIN, ArgChain, Flags, DAG, DL));
751 } else {
752 // Store relative to framepointer.
753 MemOpChains2.push_back(DAG.getStore(
754 ArgChain, DL, Arg, FIN,
756 }
757 }
758
759 if (!MemOpChains2.empty())
760 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains2);
761
762 // Store the return address to the appropriate stack slot.
763 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetFI,
765 Subtarget.getSlotSize(), FPDiff, DL);
766 }
767
768 // Build a sequence of copy-to-reg nodes chained together with token chain
769 // and flag operands which copy the outgoing args into registers.
770 SDValue InGlue;
771 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
772 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
773 RegsToPass[i].second, InGlue);
774 InGlue = Chain.getValue(1);
775 }
776
777 if (Callee->getOpcode() == ISD::GlobalAddress) {
778 // If the callee is a GlobalAddress node (quite common, every direct call
779 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
780 // it.
781 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
782
783 // We should use extra load for direct calls to dllimported functions in
784 // non-JIT mode.
785 const GlobalValue *GV = G->getGlobal();
786 if (!GV->hasDLLImportStorageClass()) {
787 unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
788
790 GV, DL, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
791
792 if (OpFlags == M68kII::MO_GOTPCREL) {
793
794 // Add a wrapper.
797
798 // Add extra indirection
799 Callee = DAG.getLoad(
802 }
803 }
804 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
806 unsigned char OpFlags =
807 Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
808
810 S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
811 }
812
813 // Returns a chain & a flag for retval copy to use.
814 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
816
817 if (!IsSibcall && IsTailCall) {
818 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, 0, InGlue, DL);
819 InGlue = Chain.getValue(1);
820 }
821
822 Ops.push_back(Chain);
823 Ops.push_back(Callee);
824
825 if (IsTailCall)
826 Ops.push_back(DAG.getConstant(FPDiff, DL, MVT::i32));
827
828 // Add argument registers to the end of the list so that they are known live
829 // into the call.
830 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
831 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
832 RegsToPass[i].second.getValueType()));
833
834 // Add a register mask operand representing the call-preserved registers.
835 const uint32_t *Mask = RegInfo->getCallPreservedMask(MF, CallConv);
836 assert(Mask && "Missing call preserved mask for calling convention");
837
838 Ops.push_back(DAG.getRegisterMask(Mask));
839
840 if (InGlue.getNode())
841 Ops.push_back(InGlue);
842
843 if (IsTailCall) {
845 return DAG.getNode(M68kISD::TC_RETURN, DL, NodeTys, Ops);
846 }
847
848 Chain = DAG.getNode(M68kISD::CALL, DL, NodeTys, Ops);
849 InGlue = Chain.getValue(1);
850
851 // Create the CALLSEQ_END node.
852 unsigned NumBytesForCalleeToPop;
853 if (M68k::isCalleePop(CallConv, IsVarArg,
855 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
856 } else if (!canGuaranteeTCO(CallConv) && SR == StackStructReturn) {
857 // If this is a call to a struct-return function, the callee
858 // pops the hidden struct pointer, so we have to push it back.
859 NumBytesForCalleeToPop = 4;
860 } else {
861 NumBytesForCalleeToPop = 0; // Callee pops nothing.
862 }
863
864 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
865 // No need to reset the stack after the call if the call doesn't return. To
866 // make the MI verify, we'll pretend the callee does it for us.
867 NumBytesForCalleeToPop = NumBytes;
868 }
869
870 // Returns a flag for retval copy to use.
871 if (!IsSibcall) {
872 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
873 InGlue, DL);
874 InGlue = Chain.getValue(1);
875 }
876
877 // Handle result values, copying them out of physregs into vregs that we
878 // return.
879 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
880 InVals);
881}
882
883SDValue M68kTargetLowering::LowerCallResult(
884 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
885 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
886 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
887
888 // Assign locations to each value returned by this call.
890 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
891 *DAG.getContext());
892 CCInfo.AnalyzeCallResult(Ins, RetCC_M68k);
893
894 // Copy all of the result registers out of their specified physreg.
895 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
896 CCValAssign &VA = RVLocs[i];
897 EVT CopyVT = VA.getLocVT();
898
899 /// ??? is this correct?
900 Chain = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), CopyVT, InGlue)
901 .getValue(1);
902 SDValue Val = Chain.getValue(0);
903
904 if (VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1)
905 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
906
907 InGlue = Chain.getValue(2);
908 InVals.push_back(Val);
909 }
910
911 return Chain;
912}
913
914//===----------------------------------------------------------------------===//
915// Formal Arguments Calling Convention Implementation
916//===----------------------------------------------------------------------===//
917
918SDValue M68kTargetLowering::LowerFormalArguments(
919 SDValue Chain, CallingConv::ID CCID, bool IsVarArg,
920 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
921 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
924 // const TargetFrameLowering &TFL = *Subtarget.getFrameLowering();
925
926 MachineFrameInfo &MFI = MF.getFrameInfo();
927
928 // Assign locations to all of the incoming arguments.
930 SmallVector<Type *, 4> ArgTypes;
931 for (const Argument &Arg : MF.getFunction().args())
932 ArgTypes.emplace_back(Arg.getType());
933 M68kCCState CCInfo(ArgTypes, CCID, IsVarArg, MF, ArgLocs, *DAG.getContext());
934
935 CCInfo.AnalyzeFormalArguments(Ins, CC_M68k);
936
937 unsigned LastVal = ~0U;
938 SDValue ArgValue;
939 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
940 CCValAssign &VA = ArgLocs[i];
941 assert(VA.getValNo() != LastVal && "Same value in different locations");
942
943 LastVal = VA.getValNo();
944
945 if (VA.isRegLoc()) {
946 EVT RegVT = VA.getLocVT();
947 const TargetRegisterClass *RC;
948 if (RegVT == MVT::i32)
949 RC = &M68k::XR32RegClass;
950 else
951 llvm_unreachable("Unknown argument type!");
952
953 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
954 ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
955
956 // If this is an 8 or 16-bit value, it is really passed promoted to 32
957 // bits. Insert an assert[sz]ext to capture this, then truncate to the
958 // right size.
959 if (VA.getLocInfo() == CCValAssign::SExt) {
960 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
961 DAG.getValueType(VA.getValVT()));
962 } else if (VA.getLocInfo() == CCValAssign::ZExt) {
963 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
964 DAG.getValueType(VA.getValVT()));
965 } else if (VA.getLocInfo() == CCValAssign::BCvt) {
966 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
967 }
968
969 if (VA.isExtInLoc()) {
970 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
971 }
972 } else {
973 assert(VA.isMemLoc());
974 ArgValue = LowerMemArgument(Chain, CCID, Ins, DL, DAG, VA, MFI, i);
975 }
976
977 // If value is passed via pointer - do a load.
978 // TODO Make sure this handling on indirect arguments is correct
980 ArgValue =
981 DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, MachinePointerInfo());
982
983 InVals.push_back(ArgValue);
984 }
985
986 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
987 // Swift calling convention does not require we copy the sret argument
988 // into %D0 for the return. We don't set SRetReturnReg for Swift.
989 if (CCID == CallingConv::Swift)
990 continue;
991
992 // ABI require that for returning structs by value we copy the sret argument
993 // into %D0 for the return. Save the argument into a virtual register so
994 // that we can access it from the return points.
995 if (Ins[i].Flags.isSRet()) {
996 unsigned Reg = MMFI->getSRetReturnReg();
997 if (!Reg) {
998 MVT PtrTy = getPointerTy(DAG.getDataLayout());
1000 MMFI->setSRetReturnReg(Reg);
1001 }
1002 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
1003 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
1004 break;
1005 }
1006 }
1007
1008 unsigned StackSize = CCInfo.getStackSize();
1009 // Align stack specially for tail calls.
1011 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
1012
1013 // If the function takes variable number of arguments, make a frame index for
1014 // the start of the first vararg value... for expansion of llvm.va_start. We
1015 // can skip this if there are no va_start calls.
1016 if (MFI.hasVAStart()) {
1017 MMFI->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1018 }
1019
1020 if (IsVarArg && MFI.hasMustTailInVarArgFunc()) {
1021 // We forward some GPRs and some vector types.
1022 SmallVector<MVT, 2> RegParmTypes;
1023 MVT IntVT = MVT::i32;
1024 RegParmTypes.push_back(IntVT);
1025
1026 // Compute the set of forwarded registers. The rest are scratch.
1027 // ??? what is this for?
1030 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_M68k);
1031
1032 // Copy all forwards from physical to virtual registers.
1033 for (ForwardedRegister &F : Forwards) {
1034 // FIXME Can we use a less constrained schedule?
1035 SDValue RegVal = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
1037 Chain = DAG.getCopyToReg(Chain, DL, F.VReg, RegVal);
1038 }
1039 }
1040
1041 // Some CCs need callee pop.
1042 if (M68k::isCalleePop(CCID, IsVarArg,
1044 MMFI->setBytesToPopOnReturn(StackSize); // Callee pops everything.
1045 } else {
1046 MMFI->setBytesToPopOnReturn(0); // Callee pops nothing.
1047 // If this is an sret function, the return should pop the hidden pointer.
1049 MMFI->setBytesToPopOnReturn(4);
1050 }
1051
1052 MMFI->setArgumentStackSize(StackSize);
1053
1054 return Chain;
1055}
1056
1057//===----------------------------------------------------------------------===//
1058// Return Value Calling Convention Implementation
1059//===----------------------------------------------------------------------===//
1060
1061bool M68kTargetLowering::CanLowerReturn(
1062 CallingConv::ID CCID, MachineFunction &MF, bool IsVarArg,
1063 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
1065 CCState CCInfo(CCID, IsVarArg, MF, RVLocs, Context);
1066 return CCInfo.CheckReturn(Outs, RetCC_M68k);
1067}
1068
1069SDValue
1070M68kTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CCID,
1071 bool IsVarArg,
1073 const SmallVectorImpl<SDValue> &OutVals,
1074 const SDLoc &DL, SelectionDAG &DAG) const {
1077
1079 CCState CCInfo(CCID, IsVarArg, MF, RVLocs, *DAG.getContext());
1080 CCInfo.AnalyzeReturn(Outs, RetCC_M68k);
1081
1082 SDValue Glue;
1084 // Operand #0 = Chain (updated below)
1085 RetOps.push_back(Chain);
1086 // Operand #1 = Bytes To Pop
1087 RetOps.push_back(
1088 DAG.getTargetConstant(MFI->getBytesToPopOnReturn(), DL, MVT::i32));
1089
1090 // Copy the result values into the output registers.
1091 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1092 CCValAssign &VA = RVLocs[i];
1093 assert(VA.isRegLoc() && "Can only return in registers!");
1094 SDValue ValToCopy = OutVals[i];
1095 EVT ValVT = ValToCopy.getValueType();
1096
1097 // Promote values to the appropriate types.
1098 if (VA.getLocInfo() == CCValAssign::SExt)
1099 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1100 else if (VA.getLocInfo() == CCValAssign::ZExt)
1101 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), ValToCopy);
1102 else if (VA.getLocInfo() == CCValAssign::AExt) {
1103 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
1104 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1105 else
1106 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), ValToCopy);
1107 } else if (VA.getLocInfo() == CCValAssign::BCvt)
1108 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
1109
1110 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), ValToCopy, Glue);
1111 Glue = Chain.getValue(1);
1112 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1113 }
1114
1115 // Swift calling convention does not require we copy the sret argument
1116 // into %d0 for the return, and SRetReturnReg is not set for Swift.
1117
1118 // ABI require that for returning structs by value we copy the sret argument
1119 // into %D0 for the return. Save the argument into a virtual register so that
1120 // we can access it from the return points.
1121 //
1122 // Checking Function.hasStructRetAttr() here is insufficient because the IR
1123 // may not have an explicit sret argument. If MFI.CanLowerReturn is
1124 // false, then an sret argument may be implicitly inserted in the SelDAG. In
1125 // either case MFI->setSRetReturnReg() will have been called.
1126 if (unsigned SRetReg = MFI->getSRetReturnReg()) {
1127 // ??? Can i just move this to the top and escape this explanation?
1128 // When we have both sret and another return value, we should use the
1129 // original Chain stored in RetOps[0], instead of the current Chain updated
1130 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
1131
1132 // For the case of sret and another return value, we have
1133 // Chain_0 at the function entry
1134 // Chain_1 = getCopyToReg(Chain_0) in the above loop
1135 // If we use Chain_1 in getCopyFromReg, we will have
1136 // Val = getCopyFromReg(Chain_1)
1137 // Chain_2 = getCopyToReg(Chain_1, Val) from below
1138
1139 // getCopyToReg(Chain_0) will be glued together with
1140 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
1141 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
1142 // Data dependency from Unit B to Unit A due to usage of Val in
1143 // getCopyToReg(Chain_1, Val)
1144 // Chain dependency from Unit A to Unit B
1145
1146 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
1147 SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
1149
1150 // ??? How will this work if CC does not use registers for args passing?
1151 // ??? What if I return multiple structs?
1152 unsigned RetValReg = M68k::D0;
1153 Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Glue);
1154 Glue = Chain.getValue(1);
1155
1156 RetOps.push_back(
1157 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
1158 }
1159
1160 RetOps[0] = Chain; // Update chain.
1161
1162 // Add the glue if we have it.
1163 if (Glue.getNode())
1164 RetOps.push_back(Glue);
1165
1166 return DAG.getNode(M68kISD::RET, DL, MVT::Other, RetOps);
1167}
1168
1169//===----------------------------------------------------------------------===//
1170// Fast Calling Convention (tail call) implementation
1171//===----------------------------------------------------------------------===//
1172
1173// Like std call, callee cleans arguments, convention except that ECX is
1174// reserved for storing the tail called function address. Only 2 registers are
1175// free for argument passing (inreg). Tail call optimization is performed
1176// provided:
1177// * tailcallopt is enabled
1178// * caller/callee are fastcc
1179// On M68k_64 architecture with GOT-style position independent code only
1180// local (within module) calls are supported at the moment. To keep the stack
1181// aligned according to platform abi the function GetAlignedArgumentStackSize
1182// ensures that argument delta is always multiples of stack alignment. (Dynamic
1183// linkers need this - darwin's dyld for example) If a tail called function
1184// callee has more arguments than the caller the caller needs to make sure that
1185// there is room to move the RETADDR to. This is achieved by reserving an area
1186// the size of the argument delta right after the original RETADDR, but before
1187// the saved framepointer or the spilled registers e.g. caller(arg1, arg2)
1188// calls callee(arg1, arg2,arg3,arg4) stack layout:
1189// arg1
1190// arg2
1191// RETADDR
1192// [ new RETADDR
1193// move area ]
1194// (possible EBP)
1195// ESI
1196// EDI
1197// local1 ..
1198
1199/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
1200/// requirement.
1201unsigned
1202M68kTargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
1203 SelectionDAG &DAG) const {
1204 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
1205 unsigned StackAlignment = TFI.getStackAlignment();
1206 uint64_t AlignMask = StackAlignment - 1;
1207 int64_t Offset = StackSize;
1208 unsigned SlotSize = Subtarget.getSlotSize();
1209 if ((Offset & AlignMask) <= (StackAlignment - SlotSize)) {
1210 // Number smaller than 12 so just add the difference.
1211 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
1212 } else {
1213 // Mask out lower bits, add stackalignment once plus the 12 bytes.
1214 Offset =
1215 ((~AlignMask) & Offset) + StackAlignment + (StackAlignment - SlotSize);
1216 }
1217 return Offset;
1218}
1219
1220/// Check whether the call is eligible for tail call optimization. Targets
1221/// that want to do tail call optimization should implement this function.
1222bool M68kTargetLowering::IsEligibleForTailCallOptimization(
1223 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
1224 bool IsCalleeStructRet, bool IsCallerStructRet, Type *RetTy,
1226 const SmallVectorImpl<SDValue> &OutVals,
1227 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
1228 if (!mayTailCallThisCC(CalleeCC))
1229 return false;
1230
1231 // If -tailcallopt is specified, make fastcc functions tail-callable.
1233 const auto &CallerF = MF.getFunction();
1234
1235 CallingConv::ID CallerCC = CallerF.getCallingConv();
1236 bool CCMatch = CallerCC == CalleeCC;
1237
1239 if (canGuaranteeTCO(CalleeCC) && CCMatch)
1240 return true;
1241 return false;
1242 }
1243
1244 // Look for obvious safe cases to perform tail call optimization that do not
1245 // require ABI changes. This is what gcc calls sibcall.
1246
1247 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
1248 // emit a special epilogue.
1249 const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1250 if (RegInfo->hasStackRealignment(MF))
1251 return false;
1252
1253 // Also avoid sibcall optimization if either caller or callee uses struct
1254 // return semantics.
1255 if (IsCalleeStructRet || IsCallerStructRet)
1256 return false;
1257
1258 // Do not sibcall optimize vararg calls unless all arguments are passed via
1259 // registers.
1260 LLVMContext &C = *DAG.getContext();
1261 if (IsVarArg && !Outs.empty()) {
1262
1264 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1265
1266 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1267 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1268 if (!ArgLocs[i].isRegLoc())
1269 return false;
1270 }
1271
1272 // Check that the call results are passed in the same way.
1273 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, RetCC_M68k,
1274 RetCC_M68k))
1275 return false;
1276
1277 // The callee has to preserve all registers the caller needs to preserve.
1278 const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
1279 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1280 if (!CCMatch) {
1281 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1282 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1283 return false;
1284 }
1285
1286 unsigned StackArgsSize = 0;
1287
1288 // If the callee takes no arguments then go on to check the results of the
1289 // call.
1290 if (!Outs.empty()) {
1291 // Check if stack adjustment is needed. For now, do not do this if any
1292 // argument is passed on the stack.
1294 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1295
1296 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1297 StackArgsSize = CCInfo.getStackSize();
1298
1299 if (StackArgsSize) {
1300 // Check if the arguments are already laid out in the right way as
1301 // the caller's fixed stack objects.
1302 MachineFrameInfo &MFI = MF.getFrameInfo();
1303 const MachineRegisterInfo *MRI = &MF.getRegInfo();
1304 const M68kInstrInfo *TII = Subtarget.getInstrInfo();
1305 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1306 CCValAssign &VA = ArgLocs[i];
1307 SDValue Arg = OutVals[i];
1308 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1310 return false;
1311 if (!VA.isRegLoc()) {
1312 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
1313 TII, VA))
1314 return false;
1315 }
1316 }
1317 }
1318
1319 bool PositionIndependent = isPositionIndependent();
1320 // If the tailcall address may be in a register, then make sure it's
1321 // possible to register allocate for it. The call address can
1322 // only target %A0 or %A1 since the tail call must be scheduled after
1323 // callee-saved registers are restored. These happen to be the same
1324 // registers used to pass 'inreg' arguments so watch out for those.
1325 if ((!isa<GlobalAddressSDNode>(Callee) &&
1326 !isa<ExternalSymbolSDNode>(Callee)) ||
1327 PositionIndependent) {
1328 unsigned NumInRegs = 0;
1329 // In PIC we need an extra register to formulate the address computation
1330 // for the callee.
1331 unsigned MaxInRegs = PositionIndependent ? 1 : 2;
1332
1333 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1334 CCValAssign &VA = ArgLocs[i];
1335 if (!VA.isRegLoc())
1336 continue;
1337 Register Reg = VA.getLocReg();
1338 switch (Reg) {
1339 default:
1340 break;
1341 case M68k::A0:
1342 case M68k::A1:
1343 if (++NumInRegs == MaxInRegs)
1344 return false;
1345 break;
1346 }
1347 }
1348 }
1349
1350 const MachineRegisterInfo &MRI = MF.getRegInfo();
1351 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
1352 return false;
1353 }
1354
1355 bool CalleeWillPop = M68k::isCalleePop(
1356 CalleeCC, IsVarArg, MF.getTarget().Options.GuaranteedTailCallOpt);
1357
1358 if (unsigned BytesToPop =
1360 // If we have bytes to pop, the callee must pop them.
1361 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
1362 if (!CalleePopMatches)
1363 return false;
1364 } else if (CalleeWillPop && StackArgsSize > 0) {
1365 // If we don't have bytes to pop, make sure the callee doesn't pop any.
1366 return false;
1367 }
1368
1369 return true;
1370}
1371
1372//===----------------------------------------------------------------------===//
1373// Custom Lower
1374//===----------------------------------------------------------------------===//
1375
1377 SelectionDAG &DAG) const {
1378 switch (Op.getOpcode()) {
1379 default:
1380 llvm_unreachable("Should not custom lower this!");
1381 case ISD::SADDO:
1382 case ISD::UADDO:
1383 case ISD::SSUBO:
1384 case ISD::USUBO:
1385 case ISD::SMULO:
1386 case ISD::UMULO:
1387 return LowerXALUO(Op, DAG);
1388 case ISD::SETCC:
1389 return LowerSETCC(Op, DAG);
1390 case ISD::SETCCCARRY:
1391 return LowerSETCCCARRY(Op, DAG);
1392 case ISD::SELECT:
1393 return LowerSELECT(Op, DAG);
1394 case ISD::BRCOND:
1395 return LowerBRCOND(Op, DAG);
1396 case ISD::ADDC:
1397 case ISD::ADDE:
1398 case ISD::SUBC:
1399 case ISD::SUBE:
1400 return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
1401 case ISD::ConstantPool:
1402 return LowerConstantPool(Op, DAG);
1403 case ISD::GlobalAddress:
1404 return LowerGlobalAddress(Op, DAG);
1406 return LowerExternalSymbol(Op, DAG);
1407 case ISD::BlockAddress:
1408 return LowerBlockAddress(Op, DAG);
1409 case ISD::JumpTable:
1410 return LowerJumpTable(Op, DAG);
1411 case ISD::VASTART:
1412 return LowerVASTART(Op, DAG);
1414 return LowerDYNAMIC_STACKALLOC(Op, DAG);
1415 case ISD::SHL_PARTS:
1416 return LowerShiftLeftParts(Op, DAG);
1417 case ISD::SRA_PARTS:
1418 return LowerShiftRightParts(Op, DAG, true);
1419 case ISD::SRL_PARTS:
1420 return LowerShiftRightParts(Op, DAG, false);
1421 case ISD::ATOMIC_FENCE:
1422 return LowerATOMICFENCE(Op, DAG);
1423 }
1424}
1425
1426bool M68kTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
1427 SDValue C) const {
1428 // Shifts and add instructions in M68000 and M68010 support
1429 // up to 32 bits, but mul only has 16-bit variant. So it's almost
1430 // certainly beneficial to lower 8/16/32-bit mul to their
1431 // add / shifts counterparts. But for 64-bits mul, it might be
1432 // safer to just leave it to compiler runtime implementations.
1433 return VT.bitsLE(MVT::i32) || Subtarget.atLeastM68020();
1434}
1435
1436SDValue M68kTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
1437 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
1438 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
1439 // looks for this combo and may remove the "setcc" instruction if the "setcc"
1440 // has only one use.
1441 SDNode *N = Op.getNode();
1442 SDValue LHS = N->getOperand(0);
1443 SDValue RHS = N->getOperand(1);
1444 unsigned BaseOp = 0;
1445 unsigned Cond = 0;
1446 SDLoc DL(Op);
1447 switch (Op.getOpcode()) {
1448 default:
1449 llvm_unreachable("Unknown ovf instruction!");
1450 case ISD::SADDO:
1451 BaseOp = M68kISD::ADD;
1453 break;
1454 case ISD::UADDO:
1455 BaseOp = M68kISD::ADD;
1457 break;
1458 case ISD::SSUBO:
1459 BaseOp = M68kISD::SUB;
1461 break;
1462 case ISD::USUBO:
1463 BaseOp = M68kISD::SUB;
1465 break;
1466 }
1467
1468 // Also sets CCR.
1469 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i8);
1470 SDValue Arith = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
1471 SDValue SetCC = DAG.getNode(M68kISD::SETCC, DL, N->getValueType(1),
1472 DAG.getConstant(Cond, DL, MVT::i8),
1473 SDValue(Arith.getNode(), 1));
1474
1475 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Arith, SetCC);
1476}
1477
1478/// Create a BTST (Bit Test) node - Test bit \p BitNo in \p Src and set
1479/// condition according to equal/not-equal condition code \p CC.
1481 const SDLoc &DL, SelectionDAG &DAG) {
1482 // If Src is i8, promote it to i32 with any_extend. There is no i8 BTST
1483 // instruction. Since the shift amount is in-range-or-undefined, we know
1484 // that doing a bittest on the i32 value is ok.
1485 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
1486 Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
1487
1488 // If the operand types disagree, extend the shift amount to match. Since
1489 // BTST ignores high bits (like shifts) we can use anyextend.
1490 if (Src.getValueType() != BitNo.getValueType())
1491 BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
1492
1493 SDValue BTST = DAG.getNode(M68kISD::BTST, DL, MVT::i32, Src, BitNo);
1494
1495 // NOTE BTST sets CCR.Z flag
1497 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
1498 DAG.getConstant(Cond, DL, MVT::i8), BTST);
1499}
1500
1501/// Result of 'and' is compared against zero. Change to a BTST node if possible.
1503 SelectionDAG &DAG) {
1504 SDValue Op0 = And.getOperand(0);
1505 SDValue Op1 = And.getOperand(1);
1506 if (Op0.getOpcode() == ISD::TRUNCATE)
1507 Op0 = Op0.getOperand(0);
1508 if (Op1.getOpcode() == ISD::TRUNCATE)
1509 Op1 = Op1.getOperand(0);
1510
1511 SDValue LHS, RHS;
1512 if (Op1.getOpcode() == ISD::SHL)
1513 std::swap(Op0, Op1);
1514 if (Op0.getOpcode() == ISD::SHL) {
1515 if (isOneConstant(Op0.getOperand(0))) {
1516 // If we looked past a truncate, check that it's only truncating away
1517 // known zeros.
1518 unsigned BitWidth = Op0.getValueSizeInBits();
1519 unsigned AndBitWidth = And.getValueSizeInBits();
1520 if (BitWidth > AndBitWidth) {
1521 auto Known = DAG.computeKnownBits(Op0);
1522 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
1523 return SDValue();
1524 }
1525 LHS = Op1;
1526 RHS = Op0.getOperand(1);
1527 }
1528 } else if (auto *AndRHS = dyn_cast<ConstantSDNode>(Op1)) {
1529 uint64_t AndRHSVal = AndRHS->getZExtValue();
1530 SDValue AndLHS = Op0;
1531
1532 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
1533 LHS = AndLHS.getOperand(0);
1534 RHS = AndLHS.getOperand(1);
1535 }
1536
1537 // Use BTST if the immediate can't be encoded in a TEST instruction.
1538 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
1539 LHS = AndLHS;
1540 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), DL, LHS.getValueType());
1541 }
1542 }
1543
1544 if (LHS.getNode())
1545 return getBitTestCondition(LHS, RHS, CC, DL, DAG);
1546
1547 return SDValue();
1548}
1549
1551 switch (SetCCOpcode) {
1552 default:
1553 llvm_unreachable("Invalid integer condition!");
1554 case ISD::SETEQ:
1555 return M68k::COND_EQ;
1556 case ISD::SETGT:
1557 return M68k::COND_GT;
1558 case ISD::SETGE:
1559 return M68k::COND_GE;
1560 case ISD::SETLT:
1561 return M68k::COND_LT;
1562 case ISD::SETLE:
1563 return M68k::COND_LE;
1564 case ISD::SETNE:
1565 return M68k::COND_NE;
1566 case ISD::SETULT:
1567 return M68k::COND_CS;
1568 case ISD::SETUGE:
1569 return M68k::COND_CC;
1570 case ISD::SETUGT:
1571 return M68k::COND_HI;
1572 case ISD::SETULE:
1573 return M68k::COND_LS;
1574 }
1575}
1576
1577/// Do a one-to-one translation of a ISD::CondCode to the M68k-specific
1578/// condition code, returning the condition code and the LHS/RHS of the
1579/// comparison to make.
1580static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
1581 bool IsFP, SDValue &LHS, SDValue &RHS,
1582 SelectionDAG &DAG) {
1583 if (!IsFP) {
1584 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
1585 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
1586 // X > -1 -> X == 0, jump !sign.
1587 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1588 return M68k::COND_PL;
1589 }
1590 if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
1591 // X < 0 -> X == 0, jump on sign.
1592 return M68k::COND_MI;
1593 }
1594 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
1595 // X < 1 -> X <= 0
1596 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1597 return M68k::COND_LE;
1598 }
1599 }
1600
1601 return TranslateIntegerM68kCC(SetCCOpcode);
1602 }
1603
1604 // First determine if it is required or is profitable to flip the operands.
1605
1606 // If LHS is a foldable load, but RHS is not, flip the condition.
1607 if (ISD::isNON_EXTLoad(LHS.getNode()) && !ISD::isNON_EXTLoad(RHS.getNode())) {
1608 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
1609 std::swap(LHS, RHS);
1610 }
1611
1612 switch (SetCCOpcode) {
1613 default:
1614 break;
1615 case ISD::SETOLT:
1616 case ISD::SETOLE:
1617 case ISD::SETUGT:
1618 case ISD::SETUGE:
1619 std::swap(LHS, RHS);
1620 break;
1621 }
1622
1623 // On a floating point condition, the flags are set as follows:
1624 // ZF PF CF op
1625 // 0 | 0 | 0 | X > Y
1626 // 0 | 0 | 1 | X < Y
1627 // 1 | 0 | 0 | X == Y
1628 // 1 | 1 | 1 | unordered
1629 switch (SetCCOpcode) {
1630 default:
1631 llvm_unreachable("Condcode should be pre-legalized away");
1632 case ISD::SETUEQ:
1633 case ISD::SETEQ:
1634 return M68k::COND_EQ;
1635 case ISD::SETOLT: // flipped
1636 case ISD::SETOGT:
1637 case ISD::SETGT:
1638 return M68k::COND_HI;
1639 case ISD::SETOLE: // flipped
1640 case ISD::SETOGE:
1641 case ISD::SETGE:
1642 return M68k::COND_CC;
1643 case ISD::SETUGT: // flipped
1644 case ISD::SETULT:
1645 case ISD::SETLT:
1646 return M68k::COND_CS;
1647 case ISD::SETUGE: // flipped
1648 case ISD::SETULE:
1649 case ISD::SETLE:
1650 return M68k::COND_LS;
1651 case ISD::SETONE:
1652 case ISD::SETNE:
1653 return M68k::COND_NE;
1654 case ISD::SETOEQ:
1655 case ISD::SETUNE:
1656 return M68k::COND_INVALID;
1657 }
1658}
1659
1660// Convert (truncate (srl X, N) to i1) to (bt X, N)
1662 const SDLoc &DL, SelectionDAG &DAG) {
1663
1664 assert(Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 &&
1665 "Expected TRUNCATE to i1 node");
1666
1667 if (Op.getOperand(0).getOpcode() != ISD::SRL)
1668 return SDValue();
1669
1670 SDValue ShiftRight = Op.getOperand(0);
1671 return getBitTestCondition(ShiftRight.getOperand(0), ShiftRight.getOperand(1),
1672 CC, DL, DAG);
1673}
1674
1675/// \brief return true if \c Op has a use that doesn't just read flags.
1676static bool hasNonFlagsUse(SDValue Op) {
1677 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
1678 ++UI) {
1679 SDNode *User = *UI;
1680 unsigned UOpNo = UI.getOperandNo();
1681 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
1682 // Look pass truncate.
1683 UOpNo = User->use_begin().getOperandNo();
1684 User = *User->use_begin();
1685 }
1686
1687 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
1688 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
1689 return true;
1690 }
1691 return false;
1692}
1693
1694SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC,
1695 const SDLoc &DL, SelectionDAG &DAG) const {
1696
1697 // CF and OF aren't always set the way we want. Determine which
1698 // of these we need.
1699 bool NeedCF = false;
1700 bool NeedOF = false;
1701 switch (M68kCC) {
1702 default:
1703 break;
1704 case M68k::COND_HI:
1705 case M68k::COND_CC:
1706 case M68k::COND_CS:
1707 case M68k::COND_LS:
1708 NeedCF = true;
1709 break;
1710 case M68k::COND_GT:
1711 case M68k::COND_GE:
1712 case M68k::COND_LT:
1713 case M68k::COND_LE:
1714 case M68k::COND_VS:
1715 case M68k::COND_VC: {
1716 // Check if we really need to set the
1717 // Overflow flag. If NoSignedWrap is present
1718 // that is not actually needed.
1719 switch (Op->getOpcode()) {
1720 case ISD::ADD:
1721 case ISD::SUB:
1722 case ISD::MUL:
1723 case ISD::SHL: {
1724 if (Op.getNode()->getFlags().hasNoSignedWrap())
1725 break;
1726 [[fallthrough]];
1727 }
1728 default:
1729 NeedOF = true;
1730 break;
1731 }
1732 break;
1733 }
1734 }
1735 // See if we can use the CCR value from the operand instead of
1736 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
1737 // we prove that the arithmetic won't overflow, we can't use OF or CF.
1738 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
1739 // Emit a CMP with 0, which is the TEST pattern.
1740 return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
1741 DAG.getConstant(0, DL, Op.getValueType()), Op);
1742 }
1743 unsigned Opcode = 0;
1744 unsigned NumOperands = 0;
1745
1746 // Truncate operations may prevent the merge of the SETCC instruction
1747 // and the arithmetic instruction before it. Attempt to truncate the operands
1748 // of the arithmetic instruction and use a reduced bit-width instruction.
1749 bool NeedTruncation = false;
1750 SDValue ArithOp = Op;
1751 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
1752 SDValue Arith = Op->getOperand(0);
1753 // Both the trunc and the arithmetic op need to have one user each.
1754 if (Arith->hasOneUse())
1755 switch (Arith.getOpcode()) {
1756 default:
1757 break;
1758 case ISD::ADD:
1759 case ISD::SUB:
1760 case ISD::AND:
1761 case ISD::OR:
1762 case ISD::XOR: {
1763 NeedTruncation = true;
1764 ArithOp = Arith;
1765 }
1766 }
1767 }
1768
1769 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
1770 // which may be the result of a CAST. We use the variable 'Op', which is the
1771 // non-casted variable when we check for possible users.
1772 switch (ArithOp.getOpcode()) {
1773 case ISD::ADD:
1774 Opcode = M68kISD::ADD;
1775 NumOperands = 2;
1776 break;
1777 case ISD::SHL:
1778 case ISD::SRL:
1779 // If we have a constant logical shift that's only used in a comparison
1780 // against zero turn it into an equivalent AND. This allows turning it into
1781 // a TEST instruction later.
1782 if ((M68kCC == M68k::COND_EQ || M68kCC == M68k::COND_NE) &&
1783 Op->hasOneUse() && isa<ConstantSDNode>(Op->getOperand(1)) &&
1784 !hasNonFlagsUse(Op)) {
1785 EVT VT = Op.getValueType();
1786 unsigned BitWidth = VT.getSizeInBits();
1787 unsigned ShAmt = Op->getConstantOperandVal(1);
1788 if (ShAmt >= BitWidth) // Avoid undefined shifts.
1789 break;
1790 APInt Mask = ArithOp.getOpcode() == ISD::SRL
1792 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
1793 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
1794 break;
1795 Op = DAG.getNode(ISD::AND, DL, VT, Op->getOperand(0),
1796 DAG.getConstant(Mask, DL, VT));
1797 }
1798 break;
1799
1800 case ISD::AND:
1801 // If the primary 'and' result isn't used, don't bother using
1802 // M68kISD::AND, because a TEST instruction will be better.
1803 if (!hasNonFlagsUse(Op)) {
1804 SDValue Op0 = ArithOp->getOperand(0);
1805 SDValue Op1 = ArithOp->getOperand(1);
1806 EVT VT = ArithOp.getValueType();
1807 bool IsAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
1808 bool IsLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
1809
1810 // But if we can combine this into an ANDN operation, then create an AND
1811 // now and allow it to be pattern matched into an ANDN.
1812 if (/*!Subtarget.hasBMI() ||*/ !IsAndn || !IsLegalAndnType)
1813 break;
1814 }
1815 [[fallthrough]];
1816 case ISD::SUB:
1817 case ISD::OR:
1818 case ISD::XOR:
1819 // Due to the ISEL shortcoming noted above, be conservative if this op is
1820 // likely to be selected as part of a load-modify-store instruction.
1821 for (const auto *U : Op.getNode()->uses())
1822 if (U->getOpcode() == ISD::STORE)
1823 goto default_case;
1824
1825 // Otherwise use a regular CCR-setting instruction.
1826 switch (ArithOp.getOpcode()) {
1827 default:
1828 llvm_unreachable("unexpected operator!");
1829 case ISD::SUB:
1830 Opcode = M68kISD::SUB;
1831 break;
1832 case ISD::XOR:
1833 Opcode = M68kISD::XOR;
1834 break;
1835 case ISD::AND:
1836 Opcode = M68kISD::AND;
1837 break;
1838 case ISD::OR:
1839 Opcode = M68kISD::OR;
1840 break;
1841 }
1842
1843 NumOperands = 2;
1844 break;
1845 case M68kISD::ADD:
1846 case M68kISD::SUB:
1847 case M68kISD::OR:
1848 case M68kISD::XOR:
1849 case M68kISD::AND:
1850 return SDValue(Op.getNode(), 1);
1851 default:
1852 default_case:
1853 break;
1854 }
1855
1856 // If we found that truncation is beneficial, perform the truncation and
1857 // update 'Op'.
1858 if (NeedTruncation) {
1859 EVT VT = Op.getValueType();
1860 SDValue WideVal = Op->getOperand(0);
1861 EVT WideVT = WideVal.getValueType();
1862 unsigned ConvertedOp = 0;
1863 // Use a target machine opcode to prevent further DAGCombine
1864 // optimizations that may separate the arithmetic operations
1865 // from the setcc node.
1866 switch (WideVal.getOpcode()) {
1867 default:
1868 break;
1869 case ISD::ADD:
1870 ConvertedOp = M68kISD::ADD;
1871 break;
1872 case ISD::SUB:
1873 ConvertedOp = M68kISD::SUB;
1874 break;
1875 case ISD::AND:
1876 ConvertedOp = M68kISD::AND;
1877 break;
1878 case ISD::OR:
1879 ConvertedOp = M68kISD::OR;
1880 break;
1881 case ISD::XOR:
1882 ConvertedOp = M68kISD::XOR;
1883 break;
1884 }
1885
1886 if (ConvertedOp) {
1887 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1888 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
1889 SDValue V0 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(0));
1890 SDValue V1 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(1));
1891 Op = DAG.getNode(ConvertedOp, DL, VT, V0, V1);
1892 }
1893 }
1894 }
1895
1896 if (Opcode == 0) {
1897 // Emit a CMP with 0, which is the TEST pattern.
1898 return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
1899 DAG.getConstant(0, DL, Op.getValueType()), Op);
1900 }
1901 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i8);
1902 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
1903
1904 SDValue New = DAG.getNode(Opcode, DL, VTs, Ops);
1905 DAG.ReplaceAllUsesWith(Op, New);
1906 return SDValue(New.getNode(), 1);
1907}
1908
1909/// \brief Return true if the condition is an unsigned comparison operation.
1910static bool isM68kCCUnsigned(unsigned M68kCC) {
1911 switch (M68kCC) {
1912 default:
1913 llvm_unreachable("Invalid integer condition!");
1914 case M68k::COND_EQ:
1915 case M68k::COND_NE:
1916 case M68k::COND_CS:
1917 case M68k::COND_HI:
1918 case M68k::COND_LS:
1919 case M68k::COND_CC:
1920 return true;
1921 case M68k::COND_GT:
1922 case M68k::COND_GE:
1923 case M68k::COND_LT:
1924 case M68k::COND_LE:
1925 return false;
1926 }
1927}
1928
1929SDValue M68kTargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC,
1930 const SDLoc &DL, SelectionDAG &DAG) const {
1931 if (isNullConstant(Op1))
1932 return EmitTest(Op0, M68kCC, DL, DAG);
1933
1934 assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&
1935 "Unexpected comparison operation for MVT::i1 operands");
1936
1937 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
1938 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
1939 // Only promote the compare up to I32 if it is a 16 bit operation
1940 // with an immediate. 16 bit immediates are to be avoided.
1941 if ((Op0.getValueType() == MVT::i16 &&
1942 (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
1944 unsigned ExtendOp =
1946 Op0 = DAG.getNode(ExtendOp, DL, MVT::i32, Op0);
1947 Op1 = DAG.getNode(ExtendOp, DL, MVT::i32, Op1);
1948 }
1949 // Use SUB instead of CMP to enable CSE between SUB and CMP.
1950 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i8);
1951 SDValue Sub = DAG.getNode(M68kISD::SUB, DL, VTs, Op0, Op1);
1952 return SDValue(Sub.getNode(), 1);
1953 }
1954 return DAG.getNode(M68kISD::CMP, DL, MVT::i8, Op0, Op1);
1955}
1956
1957/// Result of 'and' or 'trunc to i1' is compared against zero.
1958/// Change to a BTST node if possible.
1959SDValue M68kTargetLowering::LowerToBTST(SDValue Op, ISD::CondCode CC,
1960 const SDLoc &DL,
1961 SelectionDAG &DAG) const {
1962 if (Op.getOpcode() == ISD::AND)
1963 return LowerAndToBTST(Op, CC, DL, DAG);
1964 if (Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1)
1965 return LowerTruncateToBTST(Op, CC, DL, DAG);
1966 return SDValue();
1967}
1968
1969SDValue M68kTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1970 MVT VT = Op.getSimpleValueType();
1971 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
1972
1973 SDValue Op0 = Op.getOperand(0);
1974 SDValue Op1 = Op.getOperand(1);
1975 SDLoc DL(Op);
1976 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1977
1978 // Optimize to BTST if possible.
1979 // Lower (X & (1 << N)) == 0 to BTST(X, N).
1980 // Lower ((X >>u N) & 1) != 0 to BTST(X, N).
1981 // Lower ((X >>s N) & 1) != 0 to BTST(X, N).
1982 // Lower (trunc (X >> N) to i1) to BTST(X, N).
1983 if (Op0.hasOneUse() && isNullConstant(Op1) &&
1984 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1985 if (SDValue NewSetCC = LowerToBTST(Op0, CC, DL, DAG)) {
1986 if (VT == MVT::i1)
1987 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, NewSetCC);
1988 return NewSetCC;
1989 }
1990 }
1991
1992 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
1993 // these.
1994 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
1995 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1996
1997 // If the input is a setcc, then reuse the input setcc or use a new one with
1998 // the inverted condition.
1999 if (Op0.getOpcode() == M68kISD::SETCC) {
2001 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
2002 if (!Invert)
2003 return Op0;
2004
2005 CCode = M68k::GetOppositeBranchCondition(CCode);
2006 SDValue SetCC =
2007 DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2008 DAG.getConstant(CCode, DL, MVT::i8), Op0.getOperand(1));
2009 if (VT == MVT::i1)
2010 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
2011 return SetCC;
2012 }
2013 }
2014 if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2015 if (isOneConstant(Op1)) {
2017 return DAG.getSetCC(DL, VT, Op0, DAG.getConstant(0, DL, MVT::i1), NewCC);
2018 }
2019 if (!isNullConstant(Op1)) {
2020 SDValue Xor = DAG.getNode(ISD::XOR, DL, MVT::i1, Op0, Op1);
2021 return DAG.getSetCC(DL, VT, Xor, DAG.getConstant(0, DL, MVT::i1), CC);
2022 }
2023 }
2024
2025 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
2026 unsigned M68kCC = TranslateM68kCC(CC, DL, IsFP, Op0, Op1, DAG);
2027 if (M68kCC == M68k::COND_INVALID)
2028 return SDValue();
2029
2030 SDValue CCR = EmitCmp(Op0, Op1, M68kCC, DL, DAG);
2031 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2032 DAG.getConstant(M68kCC, DL, MVT::i8), CCR);
2033}
2034
2035SDValue M68kTargetLowering::LowerSETCCCARRY(SDValue Op,
2036 SelectionDAG &DAG) const {
2037 SDValue LHS = Op.getOperand(0);
2038 SDValue RHS = Op.getOperand(1);
2039 SDValue Carry = Op.getOperand(2);
2040 SDValue Cond = Op.getOperand(3);
2041 SDLoc DL(Op);
2042
2043 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
2044 M68k::CondCode CC = TranslateIntegerM68kCC(cast<CondCodeSDNode>(Cond)->get());
2045
2046 EVT CarryVT = Carry.getValueType();
2047 APInt NegOne = APInt::getAllOnes(CarryVT.getScalarSizeInBits());
2048 Carry = DAG.getNode(M68kISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), Carry,
2049 DAG.getConstant(NegOne, DL, CarryVT));
2050
2051 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
2052 SDValue Cmp =
2053 DAG.getNode(M68kISD::SUBX, DL, VTs, LHS, RHS, Carry.getValue(1));
2054
2055 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2056 DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
2057}
2058
2059/// Return true if opcode is a M68k logical comparison.
2060static bool isM68kLogicalCmp(SDValue Op) {
2061 unsigned Opc = Op.getNode()->getOpcode();
2062 if (Opc == M68kISD::CMP)
2063 return true;
2064 if (Op.getResNo() == 1 &&
2065 (Opc == M68kISD::ADD || Opc == M68kISD::SUB || Opc == M68kISD::ADDX ||
2066 Opc == M68kISD::SUBX || Opc == M68kISD::SMUL || Opc == M68kISD::UMUL ||
2067 Opc == M68kISD::OR || Opc == M68kISD::XOR || Opc == M68kISD::AND))
2068 return true;
2069
2070 if (Op.getResNo() == 2 && Opc == M68kISD::UMUL)
2071 return true;
2072
2073 return false;
2074}
2075
2077 if (V.getOpcode() != ISD::TRUNCATE)
2078 return false;
2079
2080 SDValue VOp0 = V.getOperand(0);
2081 unsigned InBits = VOp0.getValueSizeInBits();
2082 unsigned Bits = V.getValueSizeInBits();
2083 return DAG.MaskedValueIsZero(VOp0,
2084 APInt::getHighBitsSet(InBits, InBits - Bits));
2085}
2086
2087SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2088 bool addTest = true;
2089 SDValue Cond = Op.getOperand(0);
2090 SDValue Op1 = Op.getOperand(1);
2091 SDValue Op2 = Op.getOperand(2);
2092 SDLoc DL(Op);
2093 SDValue CC;
2094
2095 if (Cond.getOpcode() == ISD::SETCC) {
2096 if (SDValue NewCond = LowerSETCC(Cond, DAG))
2097 Cond = NewCond;
2098 }
2099
2100 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2101 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
2102 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
2103 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
2104 if (Cond.getOpcode() == M68kISD::SETCC &&
2105 Cond.getOperand(1).getOpcode() == M68kISD::CMP &&
2106 isNullConstant(Cond.getOperand(1).getOperand(0))) {
2107 SDValue Cmp = Cond.getOperand(1);
2108
2109 unsigned CondCode =
2110 cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
2111
2112 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2113 (CondCode == M68k::COND_EQ || CondCode == M68k::COND_NE)) {
2114 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
2115
2116 SDValue CmpOp0 = Cmp.getOperand(1);
2117 // Apply further optimizations for special cases
2118 // (select (x != 0), -1, 0) -> neg & sbb
2119 // (select (x == 0), 0, -1) -> neg & sbb
2120 if (isNullConstant(Y) &&
2121 (isAllOnesConstant(Op1) == (CondCode == M68k::COND_NE))) {
2122
2123 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
2124
2125 SDValue Neg =
2126 DAG.getNode(M68kISD::SUB, DL, VTs,
2127 DAG.getConstant(0, DL, CmpOp0.getValueType()), CmpOp0);
2128
2129 SDValue Res = DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2130 DAG.getConstant(M68k::COND_CS, DL, MVT::i8),
2131 SDValue(Neg.getNode(), 1));
2132 return Res;
2133 }
2134
2135 Cmp = DAG.getNode(M68kISD::CMP, DL, MVT::i8,
2136 DAG.getConstant(1, DL, CmpOp0.getValueType()), CmpOp0);
2137
2138 SDValue Res = // Res = 0 or -1.
2139 DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2140 DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cmp);
2141
2142 if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_EQ))
2143 Res = DAG.getNOT(DL, Res, Res.getValueType());
2144
2145 if (!isNullConstant(Op2))
2146 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
2147 return Res;
2148 }
2149 }
2150
2151 // Look past (and (setcc_carry (cmp ...)), 1).
2152 if (Cond.getOpcode() == ISD::AND &&
2153 Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2154 isOneConstant(Cond.getOperand(1)))
2155 Cond = Cond.getOperand(0);
2156
2157 // If condition flag is set by a M68kISD::CMP, then use it as the condition
2158 // setting operand in place of the M68kISD::SETCC.
2159 unsigned CondOpcode = Cond.getOpcode();
2160 if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2161 CC = Cond.getOperand(0);
2162
2163 SDValue Cmp = Cond.getOperand(1);
2164 unsigned Opc = Cmp.getOpcode();
2165
2166 bool IllegalFPCMov = false;
2167
2168 if ((isM68kLogicalCmp(Cmp) && !IllegalFPCMov) || Opc == M68kISD::BTST) {
2169 Cond = Cmp;
2170 addTest = false;
2171 }
2172 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
2173 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
2174 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
2175 SDValue LHS = Cond.getOperand(0);
2176 SDValue RHS = Cond.getOperand(1);
2177 unsigned MxOpcode;
2178 unsigned MxCond;
2179 SDVTList VTs;
2180 switch (CondOpcode) {
2181 case ISD::UADDO:
2182 MxOpcode = M68kISD::ADD;
2183 MxCond = M68k::COND_CS;
2184 break;
2185 case ISD::SADDO:
2186 MxOpcode = M68kISD::ADD;
2187 MxCond = M68k::COND_VS;
2188 break;
2189 case ISD::USUBO:
2190 MxOpcode = M68kISD::SUB;
2191 MxCond = M68k::COND_CS;
2192 break;
2193 case ISD::SSUBO:
2194 MxOpcode = M68kISD::SUB;
2195 MxCond = M68k::COND_VS;
2196 break;
2197 case ISD::UMULO:
2198 MxOpcode = M68kISD::UMUL;
2199 MxCond = M68k::COND_VS;
2200 break;
2201 case ISD::SMULO:
2202 MxOpcode = M68kISD::SMUL;
2203 MxCond = M68k::COND_VS;
2204 break;
2205 default:
2206 llvm_unreachable("unexpected overflowing operator");
2207 }
2208 if (CondOpcode == ISD::UMULO)
2209 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), MVT::i32);
2210 else
2211 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
2212
2213 SDValue MxOp = DAG.getNode(MxOpcode, DL, VTs, LHS, RHS);
2214
2215 if (CondOpcode == ISD::UMULO)
2216 Cond = MxOp.getValue(2);
2217 else
2218 Cond = MxOp.getValue(1);
2219
2220 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2221 addTest = false;
2222 }
2223
2224 if (addTest) {
2225 // Look past the truncate if the high bits are known zero.
2227 Cond = Cond.getOperand(0);
2228
2229 // We know the result of AND is compared against zero. Try to match
2230 // it to BT.
2231 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
2232 if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
2233 CC = NewSetCC.getOperand(0);
2234 Cond = NewSetCC.getOperand(1);
2235 addTest = false;
2236 }
2237 }
2238 }
2239
2240 if (addTest) {
2241 CC = DAG.getConstant(M68k::COND_NE, DL, MVT::i8);
2242 Cond = EmitTest(Cond, M68k::COND_NE, DL, DAG);
2243 }
2244
2245 // a < b ? -1 : 0 -> RES = ~setcc_carry
2246 // a < b ? 0 : -1 -> RES = setcc_carry
2247 // a >= b ? -1 : 0 -> RES = setcc_carry
2248 // a >= b ? 0 : -1 -> RES = ~setcc_carry
2249 if (Cond.getOpcode() == M68kISD::SUB) {
2250 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
2251
2252 if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
2253 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2254 (isNullConstant(Op1) || isNullConstant(Op2))) {
2255 SDValue Res =
2256 DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2257 DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cond);
2258 if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_CS))
2259 return DAG.getNOT(DL, Res, Res.getValueType());
2260 return Res;
2261 }
2262 }
2263
2264 // M68k doesn't have an i8 cmov. If both operands are the result of a
2265 // truncate widen the cmov and push the truncate through. This avoids
2266 // introducing a new branch during isel and doesn't add any extensions.
2267 if (Op.getValueType() == MVT::i8 && Op1.getOpcode() == ISD::TRUNCATE &&
2268 Op2.getOpcode() == ISD::TRUNCATE) {
2269 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
2270 if (T1.getValueType() == T2.getValueType() &&
2271 // Block CopyFromReg so partial register stalls are avoided.
2272 T1.getOpcode() != ISD::CopyFromReg &&
2273 T2.getOpcode() != ISD::CopyFromReg) {
2274 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
2275 SDValue Cmov = DAG.getNode(M68kISD::CMOV, DL, VTs, T2, T1, CC, Cond);
2276 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
2277 }
2278 }
2279
2280 // M68kISD::CMOV means set the result (which is operand 1) to the RHS if
2281 // condition is true.
2282 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
2283 SDValue Ops[] = {Op2, Op1, CC, Cond};
2284 return DAG.getNode(M68kISD::CMOV, DL, VTs, Ops);
2285}
2286
2287/// Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes
2288/// each of which has no other use apart from the AND / OR.
2289static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
2290 Opc = Op.getOpcode();
2291 if (Opc != ISD::OR && Opc != ISD::AND)
2292 return false;
2293 return (M68k::IsSETCC(Op.getOperand(0).getOpcode()) &&
2294 Op.getOperand(0).hasOneUse() &&
2295 M68k::IsSETCC(Op.getOperand(1).getOpcode()) &&
2296 Op.getOperand(1).hasOneUse());
2297}
2298
2299/// Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the
2300/// SETCC node has a single use.
2301static bool isXor1OfSetCC(SDValue Op) {
2302 if (Op.getOpcode() != ISD::XOR)
2303 return false;
2304 if (isOneConstant(Op.getOperand(1)))
2305 return Op.getOperand(0).getOpcode() == M68kISD::SETCC &&
2306 Op.getOperand(0).hasOneUse();
2307 return false;
2308}
2309
2310SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2311 bool AddTest = true;
2312 SDValue Chain = Op.getOperand(0);
2313 SDValue Cond = Op.getOperand(1);
2314 SDValue Dest = Op.getOperand(2);
2315 SDLoc DL(Op);
2316 SDValue CC;
2317 bool Inverted = false;
2318
2319 if (Cond.getOpcode() == ISD::SETCC) {
2320 // Check for setcc([su]{add,sub}o == 0).
2321 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
2322 isNullConstant(Cond.getOperand(1)) &&
2323 Cond.getOperand(0).getResNo() == 1 &&
2324 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
2325 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
2326 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
2327 Cond.getOperand(0).getOpcode() == ISD::USUBO)) {
2328 Inverted = true;
2329 Cond = Cond.getOperand(0);
2330 } else {
2331 if (SDValue NewCond = LowerSETCC(Cond, DAG))
2332 Cond = NewCond;
2333 }
2334 }
2335
2336 // Look pass (and (setcc_carry (cmp ...)), 1).
2337 if (Cond.getOpcode() == ISD::AND &&
2338 Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2339 isOneConstant(Cond.getOperand(1)))
2340 Cond = Cond.getOperand(0);
2341
2342 // If condition flag is set by a M68kISD::CMP, then use it as the condition
2343 // setting operand in place of the M68kISD::SETCC.
2344 unsigned CondOpcode = Cond.getOpcode();
2345 if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2346 CC = Cond.getOperand(0);
2347
2348 SDValue Cmp = Cond.getOperand(1);
2349 unsigned Opc = Cmp.getOpcode();
2350
2351 if (isM68kLogicalCmp(Cmp) || Opc == M68kISD::BTST) {
2352 Cond = Cmp;
2353 AddTest = false;
2354 } else {
2355 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
2356 default:
2357 break;
2358 case M68k::COND_VS:
2359 case M68k::COND_CS:
2360 // These can only come from an arithmetic instruction with overflow,
2361 // e.g. SADDO, UADDO.
2362 Cond = Cond.getNode()->getOperand(1);
2363 AddTest = false;
2364 break;
2365 }
2366 }
2367 }
2368 CondOpcode = Cond.getOpcode();
2369 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
2370 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO) {
2371 SDValue LHS = Cond.getOperand(0);
2372 SDValue RHS = Cond.getOperand(1);
2373 unsigned MxOpcode;
2374 unsigned MxCond;
2375 SDVTList VTs;
2376 // Keep this in sync with LowerXALUO, otherwise we might create redundant
2377 // instructions that can't be removed afterwards (i.e. M68kISD::ADD and
2378 // M68kISD::INC).
2379 switch (CondOpcode) {
2380 case ISD::UADDO:
2381 MxOpcode = M68kISD::ADD;
2382 MxCond = M68k::COND_CS;
2383 break;
2384 case ISD::SADDO:
2385 MxOpcode = M68kISD::ADD;
2386 MxCond = M68k::COND_VS;
2387 break;
2388 case ISD::USUBO:
2389 MxOpcode = M68kISD::SUB;
2390 MxCond = M68k::COND_CS;
2391 break;
2392 case ISD::SSUBO:
2393 MxOpcode = M68kISD::SUB;
2394 MxCond = M68k::COND_VS;
2395 break;
2396 case ISD::UMULO:
2397 MxOpcode = M68kISD::UMUL;
2398 MxCond = M68k::COND_VS;
2399 break;
2400 case ISD::SMULO:
2401 MxOpcode = M68kISD::SMUL;
2402 MxCond = M68k::COND_VS;
2403 break;
2404 default:
2405 llvm_unreachable("unexpected overflowing operator");
2406 }
2407
2408 if (Inverted)
2410
2411 if (CondOpcode == ISD::UMULO)
2412 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), MVT::i8);
2413 else
2414 VTs = DAG.getVTList(LHS.getValueType(), MVT::i8);
2415
2416 SDValue MxOp = DAG.getNode(MxOpcode, DL, VTs, LHS, RHS);
2417
2418 if (CondOpcode == ISD::UMULO)
2419 Cond = MxOp.getValue(2);
2420 else
2421 Cond = MxOp.getValue(1);
2422
2423 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2424 AddTest = false;
2425 } else {
2426 unsigned CondOpc;
2427 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
2428 SDValue Cmp = Cond.getOperand(0).getOperand(1);
2429 if (CondOpc == ISD::OR) {
2430 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
2431 // two branches instead of an explicit OR instruction with a
2432 // separate test.
2433 if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp)) {
2434 CC = Cond.getOperand(0).getOperand(0);
2435 Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2436 Dest, CC, Cmp);
2437 CC = Cond.getOperand(1).getOperand(0);
2438 Cond = Cmp;
2439 AddTest = false;
2440 }
2441 } else { // ISD::AND
2442 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
2443 // two branches instead of an explicit AND instruction with a
2444 // separate test. However, we only do this if this block doesn't
2445 // have a fall-through edge, because this requires an explicit
2446 // jmp when the condition is false.
2447 if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp) &&
2448 Op.getNode()->hasOneUse()) {
2449 M68k::CondCode CCode =
2450 (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2451 CCode = M68k::GetOppositeBranchCondition(CCode);
2452 CC = DAG.getConstant(CCode, DL, MVT::i8);
2453 SDNode *User = *Op.getNode()->use_begin();
2454 // Look for an unconditional branch following this conditional branch.
2455 // We need this because we need to reverse the successors in order
2456 // to implement FCMP_OEQ.
2457 if (User->getOpcode() == ISD::BR) {
2458 SDValue FalseBB = User->getOperand(1);
2459 SDNode *NewBR =
2460 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
2461 assert(NewBR == User);
2462 (void)NewBR;
2463 Dest = FalseBB;
2464
2465 Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2466 Dest, CC, Cmp);
2467 M68k::CondCode CCode =
2468 (M68k::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
2469 CCode = M68k::GetOppositeBranchCondition(CCode);
2470 CC = DAG.getConstant(CCode, DL, MVT::i8);
2471 Cond = Cmp;
2472 AddTest = false;
2473 }
2474 }
2475 }
2476 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
2477 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
2478 // It should be transformed during dag combiner except when the condition
2479 // is set by a arithmetics with overflow node.
2480 M68k::CondCode CCode =
2481 (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2482 CCode = M68k::GetOppositeBranchCondition(CCode);
2483 CC = DAG.getConstant(CCode, DL, MVT::i8);
2484 Cond = Cond.getOperand(0).getOperand(1);
2485 AddTest = false;
2486 }
2487 }
2488
2489 if (AddTest) {
2490 // Look pass the truncate if the high bits are known zero.
2492 Cond = Cond.getOperand(0);
2493
2494 // We know the result is compared against zero. Try to match it to BT.
2495 if (Cond.hasOneUse()) {
2496 if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
2497 CC = NewSetCC.getOperand(0);
2498 Cond = NewSetCC.getOperand(1);
2499 AddTest = false;
2500 }
2501 }
2502 }
2503
2504 if (AddTest) {
2505 M68k::CondCode MxCond = Inverted ? M68k::COND_EQ : M68k::COND_NE;
2506 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2507 Cond = EmitTest(Cond, MxCond, DL, DAG);
2508 }
2509 return DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain, Dest, CC,
2510 Cond);
2511}
2512
2513SDValue M68kTargetLowering::LowerADDC_ADDE_SUBC_SUBE(SDValue Op,
2514 SelectionDAG &DAG) const {
2515 MVT VT = Op.getNode()->getSimpleValueType(0);
2516
2517 // Let legalize expand this if it isn't a legal type yet.
2518 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
2519 return SDValue();
2520
2521 SDVTList VTs = DAG.getVTList(VT, MVT::i8);
2522
2523 unsigned Opc;
2524 bool ExtraOp = false;
2525 switch (Op.getOpcode()) {
2526 default:
2527 llvm_unreachable("Invalid code");
2528 case ISD::ADDC:
2529 Opc = M68kISD::ADD;
2530 break;
2531 case ISD::ADDE:
2532 Opc = M68kISD::ADDX;
2533 ExtraOp = true;
2534 break;
2535 case ISD::SUBC:
2536 Opc = M68kISD::SUB;
2537 break;
2538 case ISD::SUBE:
2539 Opc = M68kISD::SUBX;
2540 ExtraOp = true;
2541 break;
2542 }
2543
2544 if (!ExtraOp)
2545 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1));
2546 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1),
2547 Op.getOperand(2));
2548}
2549
2550// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2551// their target countpart wrapped in the M68kISD::Wrapper node. Suppose N is
2552// one of the above mentioned nodes. It has to be wrapped because otherwise
2553// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2554// be used to form addressing mode. These wrapped nodes will be selected
2555// into MOV32ri.
2556SDValue M68kTargetLowering::LowerConstantPool(SDValue Op,
2557 SelectionDAG &DAG) const {
2558 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2559
2560 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2561 // global base reg.
2562 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2563
2564 unsigned WrapperKind = M68kISD::Wrapper;
2565 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2566 WrapperKind = M68kISD::WrapperPC;
2567 }
2568
2569 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2571 CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
2572
2573 SDLoc DL(CP);
2574 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2575
2576 // With PIC, the address is actually $g + Offset.
2578 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2579 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2580 Result);
2581 }
2582
2583 return Result;
2584}
2585
2586SDValue M68kTargetLowering::LowerExternalSymbol(SDValue Op,
2587 SelectionDAG &DAG) const {
2588 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
2589
2590 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2591 // global base reg.
2593 unsigned char OpFlag = Subtarget.classifyExternalReference(*Mod);
2594
2595 unsigned WrapperKind = M68kISD::Wrapper;
2596 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2597 WrapperKind = M68kISD::WrapperPC;
2598 }
2599
2600 auto PtrVT = getPointerTy(DAG.getDataLayout());
2601 SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
2602
2603 SDLoc DL(Op);
2604 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2605
2606 // With PIC, the address is actually $g + Offset.
2608 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2609 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2610 Result);
2611 }
2612
2613 // For symbols that require a load from a stub to get the address, emit the
2614 // load.
2615 if (M68kII::isGlobalStubReference(OpFlag)) {
2616 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2618 }
2619
2620 return Result;
2621}
2622
2623SDValue M68kTargetLowering::LowerBlockAddress(SDValue Op,
2624 SelectionDAG &DAG) const {
2625 unsigned char OpFlags = Subtarget.classifyBlockAddressReference();
2626 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2627 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
2628 SDLoc DL(Op);
2629 auto PtrVT = getPointerTy(DAG.getDataLayout());
2630
2631 // Create the TargetBlockAddressAddress node.
2632 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
2633
2634 if (M68kII::isPCRelBlockReference(OpFlags)) {
2635 Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2636 } else {
2637 Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2638 }
2639
2640 // With PIC, the address is actually $g + Offset.
2641 if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2642 Result =
2643 DAG.getNode(ISD::ADD, DL, PtrVT,
2644 DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2645 }
2646
2647 return Result;
2648}
2649
2650SDValue M68kTargetLowering::LowerGlobalAddress(const GlobalValue *GV,
2651 const SDLoc &DL, int64_t Offset,
2652 SelectionDAG &DAG) const {
2653 unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
2654 auto PtrVT = getPointerTy(DAG.getDataLayout());
2655
2656 // Create the TargetGlobalAddress node, folding in the constant
2657 // offset if it is legal.
2659 if (M68kII::isDirectGlobalReference(OpFlags)) {
2660 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset);
2661 Offset = 0;
2662 } else {
2663 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2664 }
2665
2666 if (M68kII::isPCRelGlobalReference(OpFlags))
2667 Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2668 else
2669 Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2670
2671 // With PIC, the address is actually $g + Offset.
2672 if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2673 Result =
2674 DAG.getNode(ISD::ADD, DL, PtrVT,
2675 DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2676 }
2677
2678 // For globals that require a load from a stub to get the address, emit the
2679 // load.
2680 if (M68kII::isGlobalStubReference(OpFlags)) {
2681 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2683 }
2684
2685 // If there was a non-zero offset that we didn't fold, create an explicit
2686 // addition for it.
2687 if (Offset != 0) {
2688 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2689 DAG.getConstant(Offset, DL, PtrVT));
2690 }
2691
2692 return Result;
2693}
2694
2695SDValue M68kTargetLowering::LowerGlobalAddress(SDValue Op,
2696 SelectionDAG &DAG) const {
2697 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2698 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
2699 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
2700}
2701
2702//===----------------------------------------------------------------------===//
2703// Custom Lower Jump Table
2704//===----------------------------------------------------------------------===//
2705
2706SDValue M68kTargetLowering::LowerJumpTable(SDValue Op,
2707 SelectionDAG &DAG) const {
2708 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2709
2710 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2711 // global base reg.
2712 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2713
2714 unsigned WrapperKind = M68kISD::Wrapper;
2715 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2716 WrapperKind = M68kISD::WrapperPC;
2717 }
2718
2719 auto PtrVT = getPointerTy(DAG.getDataLayout());
2720 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
2721 SDLoc DL(JT);
2722 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2723
2724 // With PIC, the address is actually $g + Offset.
2726 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2727 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2728 Result);
2729 }
2730
2731 return Result;
2732}
2733
2735 return Subtarget.getJumpTableEncoding();
2736}
2737
2739 const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
2740 unsigned uid, MCContext &Ctx) const {
2742 Ctx);
2743}
2744
2746 SelectionDAG &DAG) const {
2750
2751 // MachineJumpTableInfo::EK_LabelDifference32 entry
2752 return Table;
2753}
2754
2755// NOTE This only used for MachineJumpTableInfo::EK_LabelDifference32 entries
2757 const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const {
2758 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
2759}
2760
2763 if (Constraint.size() > 0) {
2764 switch (Constraint[0]) {
2765 case 'a':
2766 case 'd':
2767 return C_RegisterClass;
2768 case 'I':
2769 case 'J':
2770 case 'K':
2771 case 'L':
2772 case 'M':
2773 case 'N':
2774 case 'O':
2775 case 'P':
2776 return C_Immediate;
2777 case 'C':
2778 if (Constraint.size() == 2)
2779 switch (Constraint[1]) {
2780 case '0':
2781 case 'i':
2782 case 'j':
2783 return C_Immediate;
2784 default:
2785 break;
2786 }
2787 break;
2788 case 'Q':
2789 case 'U':
2790 return C_Memory;
2791 default:
2792 break;
2793 }
2794 }
2795
2796 return TargetLowering::getConstraintType(Constraint);
2797}
2798
2800 std::string &Constraint,
2801 std::vector<SDValue> &Ops,
2802 SelectionDAG &DAG) const {
2803 SDValue Result;
2804
2805 if (Constraint.size() == 1) {
2806 // Constant constraints
2807 switch (Constraint[0]) {
2808 case 'I':
2809 case 'J':
2810 case 'K':
2811 case 'L':
2812 case 'M':
2813 case 'N':
2814 case 'O':
2815 case 'P': {
2816 auto *C = dyn_cast<ConstantSDNode>(Op);
2817 if (!C)
2818 return;
2819
2820 int64_t Val = C->getSExtValue();
2821 switch (Constraint[0]) {
2822 case 'I': // constant integer in the range [1,8]
2823 if (Val > 0 && Val <= 8)
2824 break;
2825 return;
2826 case 'J': // constant signed 16-bit integer
2827 if (isInt<16>(Val))
2828 break;
2829 return;
2830 case 'K': // constant that is NOT in the range of [-0x80, 0x80)
2831 if (Val < -0x80 || Val >= 0x80)
2832 break;
2833 return;
2834 case 'L': // constant integer in the range [-8,-1]
2835 if (Val < 0 && Val >= -8)
2836 break;
2837 return;
2838 case 'M': // constant that is NOT in the range of [-0x100, 0x100]
2839 if (Val < -0x100 || Val >= 0x100)
2840 break;
2841 return;
2842 case 'N': // constant integer in the range [24,31]
2843 if (Val >= 24 && Val <= 31)
2844 break;
2845 return;
2846 case 'O': // constant integer 16
2847 if (Val == 16)
2848 break;
2849 return;
2850 case 'P': // constant integer in the range [8,15]
2851 if (Val >= 8 && Val <= 15)
2852 break;
2853 return;
2854 default:
2855 llvm_unreachable("Unhandled constant constraint");
2856 }
2857
2858 Result = DAG.getTargetConstant(Val, SDLoc(Op), Op.getValueType());
2859 break;
2860 }
2861 default:
2862 break;
2863 }
2864 }
2865
2866 if (Constraint.size() == 2) {
2867 switch (Constraint[0]) {
2868 case 'C':
2869 // Constant constraints start with 'C'
2870 switch (Constraint[1]) {
2871 case '0':
2872 case 'i':
2873 case 'j': {
2874 auto *C = dyn_cast<ConstantSDNode>(Op);
2875 if (!C)
2876 break;
2877
2878 int64_t Val = C->getSExtValue();
2879 switch (Constraint[1]) {
2880 case '0': // constant integer 0
2881 if (!Val)
2882 break;
2883 return;
2884 case 'i': // constant integer
2885 break;
2886 case 'j': // integer constant that doesn't fit in 16 bits
2887 if (!isInt<16>(C->getSExtValue()))
2888 break;
2889 return;
2890 default:
2891 llvm_unreachable("Unhandled constant constraint");
2892 }
2893
2894 Result = DAG.getTargetConstant(Val, SDLoc(Op), Op.getValueType());
2895 break;
2896 }
2897 default:
2898 break;
2899 }
2900 break;
2901 default:
2902 break;
2903 }
2904 }
2905
2906 if (Result.getNode()) {
2907 Ops.push_back(Result);
2908 return;
2909 }
2910
2911 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2912}
2913
2914std::pair<unsigned, const TargetRegisterClass *>
2916 StringRef Constraint,
2917 MVT VT) const {
2918 if (Constraint.size() == 1) {
2919 switch (Constraint[0]) {
2920 case 'r':
2921 case 'd':
2922 switch (VT.SimpleTy) {
2923 case MVT::i8:
2924 return std::make_pair(0U, &M68k::DR8RegClass);
2925 case MVT::i16:
2926 return std::make_pair(0U, &M68k::DR16RegClass);
2927 case MVT::i32:
2928 return std::make_pair(0U, &M68k::DR32RegClass);
2929 default:
2930 break;
2931 }
2932 break;
2933 case 'a':
2934 switch (VT.SimpleTy) {
2935 case MVT::i16:
2936 return std::make_pair(0U, &M68k::AR16RegClass);
2937 case MVT::i32:
2938 return std::make_pair(0U, &M68k::AR32RegClass);
2939 default:
2940 break;
2941 }
2942 break;
2943 default:
2944 break;
2945 }
2946 }
2947
2948 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
2949}
2950
2951/// Determines whether the callee is required to pop its own arguments.
2952/// Callee pop is necessary to support tail calls.
2953bool M68k::isCalleePop(CallingConv::ID CallingConv, bool IsVarArg,
2954 bool GuaranteeTCO) {
2955 return false;
2956}
2957
2958// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
2959// together with other CMOV pseudo-opcodes into a single basic-block with
2960// conditional jump around it.
2962 switch (MI.getOpcode()) {
2963 case M68k::CMOV8d:
2964 case M68k::CMOV16d:
2965 case M68k::CMOV32r:
2966 return true;
2967
2968 default:
2969 return false;
2970 }
2971}
2972
2973// The CCR operand of SelectItr might be missing a kill marker
2974// because there were multiple uses of CCR, and ISel didn't know
2975// which to mark. Figure out whether SelectItr should have had a
2976// kill marker, and set it if it should. Returns the correct kill
2977// marker value.
2980 const TargetRegisterInfo *TRI) {
2981 // Scan forward through BB for a use/def of CCR.
2982 MachineBasicBlock::iterator miI(std::next(SelectItr));
2983 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
2984 const MachineInstr &mi = *miI;
2985 if (mi.readsRegister(M68k::CCR))
2986 return false;
2987 if (mi.definesRegister(M68k::CCR))
2988 break; // Should have kill-flag - update below.
2989 }
2990
2991 // If we hit the end of the block, check whether CCR is live into a
2992 // successor.
2993 if (miI == BB->end())
2994 for (const auto *SBB : BB->successors())
2995 if (SBB->isLiveIn(M68k::CCR))
2996 return false;
2997
2998 // We found a def, or hit the end of the basic block and CCR wasn't live
2999 // out. SelectMI should have a kill flag on CCR.
3000 SelectItr->addRegisterKilled(M68k::CCR, TRI);
3001 return true;
3002}
3003
3005M68kTargetLowering::EmitLoweredSelect(MachineInstr &MI,
3006 MachineBasicBlock *MBB) const {
3007 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
3008 DebugLoc DL = MI.getDebugLoc();
3009
3010 // To "insert" a SELECT_CC instruction, we actually have to insert the
3011 // diamond control-flow pattern. The incoming instruction knows the
3012 // destination vreg to set, the condition code register to branch on, the
3013 // true/false values to select between, and a branch opcode to use.
3014 const BasicBlock *BB = MBB->getBasicBlock();
3016
3017 // ThisMBB:
3018 // ...
3019 // TrueVal = ...
3020 // cmp ccX, r1, r2
3021 // bcc Copy1MBB
3022 // fallthrough --> Copy0MBB
3023 MachineBasicBlock *ThisMBB = MBB;
3025
3026 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
3027 // as described above, by inserting a MBB, and then making a PHI at the join
3028 // point to select the true and false operands of the CMOV in the PHI.
3029 //
3030 // The code also handles two different cases of multiple CMOV opcodes
3031 // in a row.
3032 //
3033 // Case 1:
3034 // In this case, there are multiple CMOVs in a row, all which are based on
3035 // the same condition setting (or the exact opposite condition setting).
3036 // In this case we can lower all the CMOVs using a single inserted MBB, and
3037 // then make a number of PHIs at the join point to model the CMOVs. The only
3038 // trickiness here, is that in a case like:
3039 //
3040 // t2 = CMOV cond1 t1, f1
3041 // t3 = CMOV cond1 t2, f2
3042 //
3043 // when rewriting this into PHIs, we have to perform some renaming on the
3044 // temps since you cannot have a PHI operand refer to a PHI result earlier
3045 // in the same block. The "simple" but wrong lowering would be:
3046 //
3047 // t2 = PHI t1(BB1), f1(BB2)
3048 // t3 = PHI t2(BB1), f2(BB2)
3049 //
3050 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
3051 // renaming is to note that on the path through BB1, t2 is really just a
3052 // copy of t1, and do that renaming, properly generating:
3053 //
3054 // t2 = PHI t1(BB1), f1(BB2)
3055 // t3 = PHI t1(BB1), f2(BB2)
3056 //
3057 // Case 2, we lower cascaded CMOVs such as
3058 //
3059 // (CMOV (CMOV F, T, cc1), T, cc2)
3060 //
3061 // to two successives branches.
3062 MachineInstr *CascadedCMOV = nullptr;
3063 MachineInstr *LastCMOV = &MI;
3064 M68k::CondCode CC = M68k::CondCode(MI.getOperand(3).getImm());
3067 std::next(MachineBasicBlock::iterator(MI));
3068
3069 // Check for case 1, where there are multiple CMOVs with the same condition
3070 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
3071 // number of jumps the most.
3072
3073 if (isCMOVPseudo(MI)) {
3074 // See if we have a string of CMOVS with the same condition.
3075 while (NextMIIt != MBB->end() && isCMOVPseudo(*NextMIIt) &&
3076 (NextMIIt->getOperand(3).getImm() == CC ||
3077 NextMIIt->getOperand(3).getImm() == OppCC)) {
3078 LastCMOV = &*NextMIIt;
3079 ++NextMIIt;
3080 }
3081 }
3082
3083 // This checks for case 2, but only do this if we didn't already find
3084 // case 1, as indicated by LastCMOV == MI.
3085 if (LastCMOV == &MI && NextMIIt != MBB->end() &&
3086 NextMIIt->getOpcode() == MI.getOpcode() &&
3087 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
3088 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
3089 NextMIIt->getOperand(1).isKill()) {
3090 CascadedCMOV = &*NextMIIt;
3091 }
3092
3093 MachineBasicBlock *Jcc1MBB = nullptr;
3094
3095 // If we have a cascaded CMOV, we lower it to two successive branches to
3096 // the same block. CCR is used by both, so mark it as live in the second.
3097 if (CascadedCMOV) {
3098 Jcc1MBB = F->CreateMachineBasicBlock(BB);
3099 F->insert(It, Jcc1MBB);
3100 Jcc1MBB->addLiveIn(M68k::CCR);
3101 }
3102
3103 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(BB);
3104 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
3105 F->insert(It, Copy0MBB);
3106 F->insert(It, SinkMBB);
3107
3108 // If the CCR register isn't dead in the terminator, then claim that it's
3109 // live into the sink and copy blocks.
3110 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3111
3112 MachineInstr *LastCCRSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
3113 if (!LastCCRSUser->killsRegister(M68k::CCR) &&
3114 !checkAndUpdateCCRKill(LastCCRSUser, MBB, TRI)) {
3115 Copy0MBB->addLiveIn(M68k::CCR);
3116 SinkMBB->addLiveIn(M68k::CCR);
3117 }
3118
3119 // Transfer the remainder of MBB and its successor edges to SinkMBB.
3120 SinkMBB->splice(SinkMBB->begin(), MBB,
3121 std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
3123
3124 // Add the true and fallthrough blocks as its successors.
3125 if (CascadedCMOV) {
3126 // The fallthrough block may be Jcc1MBB, if we have a cascaded CMOV.
3127 MBB->addSuccessor(Jcc1MBB);
3128
3129 // In that case, Jcc1MBB will itself fallthrough the Copy0MBB, and
3130 // jump to the SinkMBB.
3131 Jcc1MBB->addSuccessor(Copy0MBB);
3132 Jcc1MBB->addSuccessor(SinkMBB);
3133 } else {
3134 MBB->addSuccessor(Copy0MBB);
3135 }
3136
3137 // The true block target of the first (or only) branch is always SinkMBB.
3138 MBB->addSuccessor(SinkMBB);
3139
3140 // Create the conditional branch instruction.
3141 unsigned Opc = M68k::GetCondBranchFromCond(CC);
3142 BuildMI(MBB, DL, TII->get(Opc)).addMBB(SinkMBB);
3143
3144 if (CascadedCMOV) {
3145 unsigned Opc2 = M68k::GetCondBranchFromCond(
3146 (M68k::CondCode)CascadedCMOV->getOperand(3).getImm());
3147 BuildMI(Jcc1MBB, DL, TII->get(Opc2)).addMBB(SinkMBB);
3148 }
3149
3150 // Copy0MBB:
3151 // %FalseValue = ...
3152 // # fallthrough to SinkMBB
3153 Copy0MBB->addSuccessor(SinkMBB);
3154
3155 // SinkMBB:
3156 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
3157 // ...
3160 std::next(MachineBasicBlock::iterator(LastCMOV));
3161 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
3164
3165 // As we are creating the PHIs, we have to be careful if there is more than
3166 // one. Later CMOVs may reference the results of earlier CMOVs, but later
3167 // PHIs have to reference the individual true/false inputs from earlier PHIs.
3168 // That also means that PHI construction must work forward from earlier to
3169 // later, and that the code must maintain a mapping from earlier PHI's
3170 // destination registers, and the registers that went into the PHI.
3171
3172 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
3173 Register DestReg = MIIt->getOperand(0).getReg();
3174 Register Op1Reg = MIIt->getOperand(1).getReg();
3175 Register Op2Reg = MIIt->getOperand(2).getReg();
3176
3177 // If this CMOV we are generating is the opposite condition from
3178 // the jump we generated, then we have to swap the operands for the
3179 // PHI that is going to be generated.
3180 if (MIIt->getOperand(3).getImm() == OppCC)
3181 std::swap(Op1Reg, Op2Reg);
3182
3183 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
3184 Op1Reg = RegRewriteTable[Op1Reg].first;
3185
3186 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
3187 Op2Reg = RegRewriteTable[Op2Reg].second;
3188
3189 MIB =
3190 BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(M68k::PHI), DestReg)
3191 .addReg(Op1Reg)
3192 .addMBB(Copy0MBB)
3193 .addReg(Op2Reg)
3194 .addMBB(ThisMBB);
3195
3196 // Add this PHI to the rewrite table.
3197 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
3198 }
3199
3200 // If we have a cascaded CMOV, the second Jcc provides the same incoming
3201 // value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
3202 if (CascadedCMOV) {
3203 MIB.addReg(MI.getOperand(2).getReg()).addMBB(Jcc1MBB);
3204 // Copy the PHI result to the register defined by the second CMOV.
3205 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
3206 DL, TII->get(TargetOpcode::COPY),
3207 CascadedCMOV->getOperand(0).getReg())
3208 .addReg(MI.getOperand(0).getReg());
3209 CascadedCMOV->eraseFromParent();
3210 }
3211
3212 // Now remove the CMOV(s).
3213 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;)
3214 (MIIt++)->eraseFromParent();
3215
3216 return SinkMBB;
3217}
3218
3220M68kTargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
3221 MachineBasicBlock *BB) const {
3222 llvm_unreachable("Cannot lower Segmented Stack Alloca with stack-split on");
3223}
3224
3227 MachineBasicBlock *BB) const {
3228 switch (MI.getOpcode()) {
3229 default:
3230 llvm_unreachable("Unexpected instr type to insert");
3231 case M68k::CMOV8d:
3232 case M68k::CMOV16d:
3233 case M68k::CMOV32r:
3234 return EmitLoweredSelect(MI, BB);
3235 case M68k::SALLOCA:
3236 return EmitLoweredSegAlloca(MI, BB);
3237 }
3238}
3239
3240SDValue M68kTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3242 auto PtrVT = getPointerTy(MF.getDataLayout());
3244
3245 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3246 SDLoc DL(Op);
3247
3248 // vastart just stores the address of the VarArgsFrameIndex slot into the
3249 // memory location argument.
3250 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3251 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
3252 MachinePointerInfo(SV));
3253}
3254
3255SDValue M68kTargetLowering::LowerATOMICFENCE(SDValue Op,
3256 SelectionDAG &DAG) const {
3257 // Lower to a memory barrier created from inline asm.
3258 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3259 LLVMContext &Ctx = *DAG.getContext();
3260
3263 const SDValue AsmOperands[4] = {
3264 Op.getOperand(0), // Input chain
3266 "", TLI.getProgramPointerTy(
3267 DAG.getDataLayout())), // Empty inline asm string
3268 DAG.getMDNode(MDNode::get(Ctx, {})), // (empty) srcloc
3269 DAG.getTargetConstant(Flags, SDLoc(Op),
3270 TLI.getPointerTy(DAG.getDataLayout())), // Flags
3271 };
3272
3273 return DAG.getNode(ISD::INLINEASM, SDLoc(Op),
3274 DAG.getVTList(MVT::Other, MVT::Glue), AsmOperands);
3275}
3276
3277// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
3278// Calls to _alloca are needed to probe the stack when allocating more than 4k
3279// bytes in one go. Touching the stack at 4K increments is necessary to ensure
3280// that the guard pages used by the OS virtual memory manager are allocated in
3281// correct sequence.
3282SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
3283 SelectionDAG &DAG) const {
3285 bool SplitStack = MF.shouldSplitStack();
3286
3287 SDLoc DL(Op);
3288
3289 // Get the inputs.
3290 SDNode *Node = Op.getNode();
3291 SDValue Chain = Op.getOperand(0);
3292 SDValue Size = Op.getOperand(1);
3293 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3294 EVT VT = Node->getValueType(0);
3295
3296 // Chain the dynamic stack allocation so that it doesn't modify the stack
3297 // pointer when other instructions are using the stack.
3298 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
3299
3301 if (SplitStack) {
3302 auto &MRI = MF.getRegInfo();
3303 auto SPTy = getPointerTy(DAG.getDataLayout());
3304 auto *ARClass = getRegClassFor(SPTy);
3305 Register Vreg = MRI.createVirtualRegister(ARClass);
3306 Chain = DAG.getCopyToReg(Chain, DL, Vreg, Size);
3307 Result = DAG.getNode(M68kISD::SEG_ALLOCA, DL, SPTy, Chain,
3308 DAG.getRegister(Vreg, SPTy));
3309 } else {
3310 auto &TLI = DAG.getTargetLoweringInfo();
3312 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
3313 " not tell us which reg is the stack pointer!");
3314
3315 SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT);
3316 Chain = SP.getValue(1);
3317 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3318 unsigned StackAlign = TFI.getStackAlignment();
3319 Result = DAG.getNode(ISD::SUB, DL, VT, SP, Size); // Value
3320 if (Align > StackAlign)
3321 Result = DAG.getNode(ISD::AND, DL, VT, Result,
3322 DAG.getConstant(-(uint64_t)Align, DL, VT));
3323 Chain = DAG.getCopyToReg(Chain, DL, SPReg, Result); // Output chain
3324 }
3325
3326 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), DL);
3327
3328 SDValue Ops[2] = {Result, Chain};
3329 return DAG.getMergeValues(Ops, DL);
3330}
3331
3332SDValue M68kTargetLowering::LowerShiftLeftParts(SDValue Op,
3333 SelectionDAG &DAG) const {
3334 SDLoc DL(Op);
3335 SDValue Lo = Op.getOperand(0);
3336 SDValue Hi = Op.getOperand(1);
3337 SDValue Shamt = Op.getOperand(2);
3338 EVT VT = Lo.getValueType();
3339
3340 // if Shamt - register size < 0: // Shamt < register size
3341 // Lo = Lo << Shamt
3342 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (register size - 1 ^ Shamt))
3343 // else:
3344 // Lo = 0
3345 // Hi = Lo << (Shamt - register size)
3346
3347 SDValue Zero = DAG.getConstant(0, DL, VT);
3348 SDValue One = DAG.getConstant(1, DL, VT);
3349 SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT);
3350 SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
3351 SDValue ShamtMinusRegisterSize =
3352 DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
3353 SDValue RegisterSizeMinus1Shamt =
3354 DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
3355
3356 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3357 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3358 SDValue ShiftRightLo =
3359 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, RegisterSizeMinus1Shamt);
3360 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3361 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3362 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusRegisterSize);
3363
3364 SDValue CC =
3365 DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
3366
3367 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3368 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3369
3370 return DAG.getMergeValues({Lo, Hi}, DL);
3371}
3372
3373SDValue M68kTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3374 bool IsSRA) const {
3375 SDLoc DL(Op);
3376 SDValue Lo = Op.getOperand(0);
3377 SDValue Hi = Op.getOperand(1);
3378 SDValue Shamt = Op.getOperand(2);
3379 EVT VT = Lo.getValueType();
3380
3381 // SRA expansion:
3382 // if Shamt - register size < 0: // Shamt < register size
3383 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
3384 // Hi = Hi >>s Shamt
3385 // else:
3386 // Lo = Hi >>s (Shamt - register size);
3387 // Hi = Hi >>s (register size - 1)
3388 //
3389 // SRL expansion:
3390 // if Shamt - register size < 0: // Shamt < register size
3391 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
3392 // Hi = Hi >>u Shamt
3393 // else:
3394 // Lo = Hi >>u (Shamt - register size);
3395 // Hi = 0;
3396
3397 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3398
3399 SDValue Zero = DAG.getConstant(0, DL, VT);
3400 SDValue One = DAG.getConstant(1, DL, VT);
3401 SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT);
3402 SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
3403 SDValue ShamtMinusRegisterSize =
3404 DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
3405 SDValue RegisterSizeMinus1Shamt =
3406 DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
3407
3408 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3409 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3410 SDValue ShiftLeftHi =
3411 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, RegisterSizeMinus1Shamt);
3412 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3413 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3414 SDValue LoFalse =
3415 DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusRegisterSize);
3416 SDValue HiFalse =
3417 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, RegisterSizeMinus1) : Zero;
3418
3419 SDValue CC =
3420 DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
3421
3422 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3423 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3424
3425 return DAG.getMergeValues({Lo, Hi}, DL);
3426}
3427
3428//===----------------------------------------------------------------------===//
3429// DAG Combine
3430//===----------------------------------------------------------------------===//
3431
3433 SelectionDAG &DAG) {
3434 return DAG.getNode(M68kISD::SETCC, dl, MVT::i8,
3435 DAG.getConstant(Cond, dl, MVT::i8), CCR);
3436}
3437// When legalizing carry, we create carries via add X, -1
3438// If that comes from an actual carry, via setcc, we use the
3439// carry directly.
3441 if (CCR.getOpcode() == M68kISD::ADD) {
3442 if (isAllOnesConstant(CCR.getOperand(1))) {
3443 SDValue Carry = CCR.getOperand(0);
3444 while (Carry.getOpcode() == ISD::TRUNCATE ||
3445 Carry.getOpcode() == ISD::ZERO_EXTEND ||
3446 Carry.getOpcode() == ISD::SIGN_EXTEND ||
3447 Carry.getOpcode() == ISD::ANY_EXTEND ||
3448 (Carry.getOpcode() == ISD::AND &&
3449 isOneConstant(Carry.getOperand(1))))
3450 Carry = Carry.getOperand(0);
3451 if (Carry.getOpcode() == M68kISD::SETCC ||
3452 Carry.getOpcode() == M68kISD::SETCC_CARRY) {
3453 if (Carry.getConstantOperandVal(0) == M68k::COND_CS)
3454 return Carry.getOperand(1);
3455 }
3456 }
3457 }
3458
3459 return SDValue();
3460}
3461
3462/// Optimize a CCR definition used according to the condition code \p CC into
3463/// a simpler CCR value, potentially returning a new \p CC and replacing uses
3464/// of chain values.
3466 SelectionDAG &DAG,
3467 const M68kSubtarget &Subtarget) {
3468 if (CC == M68k::COND_CS)
3470 return Flags;
3471
3472 return SDValue();
3473}
3474
3475// Optimize RES = M68kISD::SETCC CONDCODE, CCR_INPUT
3477 const M68kSubtarget &Subtarget) {
3478 SDLoc DL(N);
3479 M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(0));
3480 SDValue CCR = N->getOperand(1);
3481
3482 // Try to simplify the CCR and condition code operands.
3483 if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget))
3484 return getSETCC(CC, Flags, DL, DAG);
3485
3486 return SDValue();
3487}
3489 const M68kSubtarget &Subtarget) {
3490 SDLoc DL(N);
3491 M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(2));
3492 SDValue CCR = N->getOperand(3);
3493
3494 // Try to simplify the CCR and condition code operands.
3495 // Make sure to not keep references to operands, as combineSetCCCCR can
3496 // RAUW them under us.
3497 if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget)) {
3498 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
3499 return DAG.getNode(M68kISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
3500 N->getOperand(1), Cond, Flags);
3501 }
3502
3503 return SDValue();
3504}
3505
3507 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3508 MVT VT = N->getSimpleValueType(0);
3509 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3510 return DAG.getNode(M68kISD::SUBX, SDLoc(N), VTs, N->getOperand(0),
3511 N->getOperand(1), Flags);
3512 }
3513
3514 return SDValue();
3515}
3516
3517// Optimize RES, CCR = M68kISD::ADDX LHS, RHS, CCR
3520 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3521 MVT VT = N->getSimpleValueType(0);
3522 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3523 return DAG.getNode(M68kISD::ADDX, SDLoc(N), VTs, N->getOperand(0),
3524 N->getOperand(1), Flags);
3525 }
3526
3527 return SDValue();
3528}
3529
3530SDValue M68kTargetLowering::PerformDAGCombine(SDNode *N,
3531 DAGCombinerInfo &DCI) const {
3532 SelectionDAG &DAG = DCI.DAG;
3533 switch (N->getOpcode()) {
3534 case M68kISD::SUBX:
3535 return combineSUBX(N, DAG);
3536 case M68kISD::ADDX:
3537 return combineADDX(N, DAG, DCI);
3538 case M68kISD::SETCC:
3539 return combineM68kSetCC(N, DAG, Subtarget);
3540 case M68kISD::BRCOND:
3541 return combineM68kBrCond(N, DAG, Subtarget);
3542 }
3543
3544 return SDValue();
3545}
3546
3547//===----------------------------------------------------------------------===//
3548// M68kISD Node Names
3549//===----------------------------------------------------------------------===//
3550const char *M68kTargetLowering::getTargetNodeName(unsigned Opcode) const {
3551 switch (Opcode) {
3552 case M68kISD::CALL:
3553 return "M68kISD::CALL";
3554 case M68kISD::TAIL_CALL:
3555 return "M68kISD::TAIL_CALL";
3556 case M68kISD::RET:
3557 return "M68kISD::RET";
3558 case M68kISD::TC_RETURN:
3559 return "M68kISD::TC_RETURN";
3560 case M68kISD::ADD:
3561 return "M68kISD::ADD";
3562 case M68kISD::SUB:
3563 return "M68kISD::SUB";
3564 case M68kISD::ADDX:
3565 return "M68kISD::ADDX";
3566 case M68kISD::SUBX:
3567 return "M68kISD::SUBX";
3568 case M68kISD::SMUL:
3569 return "M68kISD::SMUL";
3570 case M68kISD::UMUL:
3571 return "M68kISD::UMUL";
3572 case M68kISD::OR:
3573 return "M68kISD::OR";
3574 case M68kISD::XOR:
3575 return "M68kISD::XOR";
3576 case M68kISD::AND:
3577 return "M68kISD::AND";
3578 case M68kISD::CMP:
3579 return "M68kISD::CMP";
3580 case M68kISD::BTST:
3581 return "M68kISD::BTST";
3582 case M68kISD::SELECT:
3583 return "M68kISD::SELECT";
3584 case M68kISD::CMOV:
3585 return "M68kISD::CMOV";
3586 case M68kISD::BRCOND:
3587 return "M68kISD::BRCOND";
3588 case M68kISD::SETCC:
3589 return "M68kISD::SETCC";
3591 return "M68kISD::SETCC_CARRY";
3593 return "M68kISD::GLOBAL_BASE_REG";
3594 case M68kISD::Wrapper:
3595 return "M68kISD::Wrapper";
3596 case M68kISD::WrapperPC:
3597 return "M68kISD::WrapperPC";
3599 return "M68kISD::SEG_ALLOCA";
3600 default:
3601 return NULL;
3602 }
3603}
3604
3606 bool IsVarArg) const {
3607 if (Return)
3608 return RetCC_M68k_C;
3609 else
3610 return CC_M68k_C;
3611}
unsigned const MachineRegisterInfo * MRI
static SDValue getSETCC(AArch64CC::CondCode CC, SDValue NZCV, const SDLoc &DL, SelectionDAG &DAG)
Helper function to create 'CSET', which is equivalent to 'CSINC <Wd>, WZR, WZR, invert(<cond>)'.
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
MatchingStackOffset - Return true if the given stack call argument is already available in the same p...
SmallVector< MachineOperand, 4 > Cond
return RetTy
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:463
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
IRTranslator LLVM IR MI
static LVOptions Options
Definition: LVOptions.cpp:25
This file contains the custom routines for the M68k Calling Convention that aren't done by tablegen.
static SDValue LowerTruncateToBTST(SDValue Op, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
static SDValue combineADDX(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI)
static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc)
Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes each of which has no other use...
static bool hasNonFlagsUse(SDValue Op)
return true if Op has a use that doesn't just read flags.
static bool isM68kCCUnsigned(unsigned M68kCC)
Return true if the condition is an unsigned comparison operation.
static StructReturnType callIsStructReturn(const SmallVectorImpl< ISD::OutputArg > &Outs)
static bool isXor1OfSetCC(SDValue Op)
Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the SETCC node has a single use...
static SDValue LowerAndToBTST(SDValue And, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
Result of 'and' is compared against zero. Change to a BTST node if possible.
static SDValue combineM68kBrCond(SDNode *N, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
static M68k::CondCode TranslateIntegerM68kCC(ISD::CondCode SetCCOpcode)
static StructReturnType argsAreStructReturn(const SmallVectorImpl< ISD::InputArg > &Ins)
Determines whether a function uses struct return semantics.
static bool isCMOVPseudo(MachineInstr &MI)
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt)
Return true if the function is being made into a tailcall target by changing its ABI.
static bool isM68kLogicalCmp(SDValue Op)
Return true if opcode is a M68k logical comparison.
static SDValue combineM68kSetCC(SDNode *N, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
static SDValue combineSetCCCCR(SDValue CCR, M68k::CondCode &CC, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
Optimize a CCR definition used according to the condition code CC into a simpler CCR value,...
static SDValue combineCarryThroughADD(SDValue CCR)
static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
Create a BTST (Bit Test) node - Test bit BitNo in Src and set condition according to equal/not-equal ...
StructReturnType
@ NotStructReturn
@ RegStructReturn
@ StackStructReturn
static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG)
static bool checkAndUpdateCCRKill(MachineBasicBlock::iterator SelectItr, MachineBasicBlock *BB, const TargetRegisterInfo *TRI)
static SDValue combineSUBX(SDNode *N, SelectionDAG &DAG)
static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL, bool IsFP, SDValue &LHS, SDValue &RHS, SelectionDAG &DAG)
Do a one-to-one translation of a ISD::CondCode to the M68k-specific condition code,...
This file defines the interfaces that M68k uses to lower LLVM code into a selection DAG.
This file declares the M68k specific subclass of MachineFunctionInfo.
This file declares the M68k specific subclass of TargetSubtargetInfo.
This file declares the M68k specific subclass of TargetMachine.
This file contains declarations for M68k ELF object file lowering.
#define F(x, y, z)
Definition: MD5.cpp:55
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::FuncInfo FuncInfo
#define T1
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
@ Flags
Definition: TextStubV5.cpp:93
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:75
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:214
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:279
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:718
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
The address of a basic block.
Definition: Constants.h:874
CCState - This class holds information needed while lowering arguments and return values.
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool isMemLoc() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
unsigned getValNo() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This is an important base class in LLVM.
Definition: Constant.h:41
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
iterator_range< arg_iterator > args()
Definition: Function.h:804
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:670
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:646
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:237
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:630
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:274
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setBytesToPopOnReturn(unsigned bytes)
void setArgumentStackSize(unsigned size)
unsigned char classifyExternalReference(const Module &M) const
Classify a external variable reference for the current subtarget according to how we should reference...
unsigned char classifyBlockAddressReference() const
Classify a blockaddress reference for the current subtarget according to how we should reference it i...
unsigned getSlotSize() const
getSlotSize - Stack slot size in bytes.
const M68kInstrInfo * getInstrInfo() const override
unsigned char classifyGlobalReference(const GlobalValue *GV, const Module &M) const
Classify a global variable reference for the current subtarget according to how we should reference i...
unsigned getJumpTableEncoding() const
unsigned char classifyLocalReference(const GlobalValue *GV) const
Classify a global variable reference for the current subtarget according to how we should reference i...
const M68kRegisterInfo * getRegisterInfo() const override
bool atLeastM68020() const
Definition: M68kSubtarget.h:89
unsigned char classifyGlobalFunctionReference(const GlobalValue *GV, const Module &M) const
Classify a global function reference for the current subtarget.
const M68kFrameLowering * getFrameLowering() const override
ConstraintType getConstraintType(StringRef ConstraintStr) const override
Given a constraint, return the type of constraint it is for this target.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
CCAssignFn * getCCAssignFn(CallingConv::ID CC, bool Return, bool IsVarArg) const
M68kTargetLowering(const M68kTargetMachine &TM, const M68kSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the value type to use for ISD::SETCC.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
Context object for machine code objects.
Definition: MCContext.h:76
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:386
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1416
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setObjectZExt(int ObjectIdx, bool IsZExt)
void setObjectSExt(int ObjectIdx, bool IsSExt)
void setHasTailCall(bool V=true)
bool isObjectZExt(int ObjectIdx) const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isObjectSExt(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
Definition: MachineInstr.h:68
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
int64_t getImm() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
const SDValue & getOperand(unsigned Num) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:721
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:478
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:731
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:472
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:473
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:773
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:675
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:768
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:469
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:799
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
Definition: SelectionDAG.h:485
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:737
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:554
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetOptions Options
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
use_iterator use_begin()
Definition: Value.h:360
self_iterator getIterator()
Definition: ilist_node.h:82
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:119
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ M68k_INTR
Used for M68k interrupt routines.
Definition: CallingConv.h:232
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:749
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:236
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1069
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1065
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:250
@ ATOMIC_LOAD_NAND
Definition: ISDOpcodes.h:1206
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:713
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1098
@ ATOMIC_LOAD_MAX
Definition: ISDOpcodes.h:1208
@ ATOMIC_LOAD_UMIN
Definition: ISDOpcodes.h:1209