Bug Summary

File:llvm/include/llvm/CodeGen/MachineInstrBuilder.h
Warning:line 102, column 5
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name M68kISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/M68k -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/M68k -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/M68k -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-04-14-063029-18377-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp

1//===-- M68kISelLowering.cpp - M68k DAG Lowering Impl ------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the interfaces that M68k uses to lower LLVM code into a
11/// selection DAG.
12///
13//===----------------------------------------------------------------------===//
14
15#include "M68kISelLowering.h"
16#include "M68kCallingConv.h"
17#include "M68kMachineFunction.h"
18#include "M68kSubtarget.h"
19#include "M68kTargetMachine.h"
20#include "M68kTargetObjectFile.h"
21
22#include "llvm/ADT/Statistic.h"
23#include "llvm/CodeGen/CallingConvLower.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineFunction.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineJumpTableInfo.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/SelectionDAG.h"
30#include "llvm/CodeGen/ValueTypes.h"
31#include "llvm/IR/CallingConv.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/GlobalVariable.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/KnownBits.h"
38#include "llvm/Support/raw_ostream.h"
39
40using namespace llvm;
41
42#define DEBUG_TYPE"M68k-isel" "M68k-isel"
43
44STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"M68k-isel", "NumTailCalls"
, "Number of tail calls"}
;
45
46M68kTargetLowering::M68kTargetLowering(const M68kTargetMachine &TM,
47 const M68kSubtarget &STI)
48 : TargetLowering(TM), Subtarget(STI), TM(TM) {
49
50 MVT PtrVT = MVT::i32;
51
52 setBooleanContents(ZeroOrOneBooleanContent);
53
54 auto *RegInfo = Subtarget.getRegisterInfo();
55 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
56
57 // Set up the register classes.
58 addRegisterClass(MVT::i8, &M68k::DR8RegClass);
59 addRegisterClass(MVT::i16, &M68k::XR16RegClass);
60 addRegisterClass(MVT::i32, &M68k::XR32RegClass);
61
62 for (auto VT : MVT::integer_valuetypes()) {
63 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
64 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
65 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
66 }
67
68 // We don't accept any truncstore of integer registers.
69 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
70 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
71 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
72 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
73 setTruncStoreAction(MVT::i32, MVT::i8, Expand);
74 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
75
76 setOperationAction(ISD::MUL, MVT::i8, Promote);
77 setOperationAction(ISD::MUL, MVT::i16, Legal);
78 if (Subtarget.atLeastM68020())
79 setOperationAction(ISD::MUL, MVT::i32, Legal);
80 else
81 setOperationAction(ISD::MUL, MVT::i32, LibCall);
82 setOperationAction(ISD::MUL, MVT::i64, LibCall);
83
84 for (auto OP :
85 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::UDIVREM, ISD::SDIVREM,
86 ISD::MULHS, ISD::MULHU, ISD::UMUL_LOHI, ISD::SMUL_LOHI}) {
87 setOperationAction(OP, MVT::i8, Promote);
88 setOperationAction(OP, MVT::i16, Legal);
89 setOperationAction(OP, MVT::i32, LibCall);
90 }
91
92 for (auto OP : {ISD::UMUL_LOHI, ISD::SMUL_LOHI}) {
93 setOperationAction(OP, MVT::i8, Expand);
94 setOperationAction(OP, MVT::i16, Expand);
95 }
96
97 // FIXME It would be better to use a custom lowering
98 for (auto OP : {ISD::SMULO, ISD::UMULO}) {
99 setOperationAction(OP, MVT::i8, Expand);
100 setOperationAction(OP, MVT::i16, Expand);
101 setOperationAction(OP, MVT::i32, Expand);
102 }
103
104 // Add/Sub overflow ops with MVT::Glues are lowered to CCR dependences.
105 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
106 setOperationAction(ISD::ADDC, VT, Custom);
107 setOperationAction(ISD::ADDE, VT, Custom);
108 setOperationAction(ISD::SUBC, VT, Custom);
109 setOperationAction(ISD::SUBE, VT, Custom);
110 }
111
112 // SADDO and friends are legal with this setup, i hope
113 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
114 setOperationAction(ISD::SADDO, VT, Custom);
115 setOperationAction(ISD::UADDO, VT, Custom);
116 setOperationAction(ISD::SSUBO, VT, Custom);
117 setOperationAction(ISD::USUBO, VT, Custom);
118 }
119
120 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
121 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
122
123 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
124 setOperationAction(ISD::BR_CC, VT, Expand);
125 setOperationAction(ISD::SELECT, VT, Custom);
126 setOperationAction(ISD::SELECT_CC, VT, Expand);
127 setOperationAction(ISD::SETCC, VT, Custom);
128 setOperationAction(ISD::SETCCCARRY, VT, Custom);
129 }
130
131 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
132 setOperationAction(ISD::BSWAP, VT, Expand);
133 setOperationAction(ISD::CTTZ, VT, Expand);
134 setOperationAction(ISD::CTLZ, VT, Expand);
135 setOperationAction(ISD::CTPOP, VT, Expand);
136 }
137
138 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
139 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
140 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
141 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
142 setOperationAction(ISD::ExternalSymbol, MVT::i32, Custom);
143 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
144
145 setOperationAction(ISD::VASTART, MVT::Other, Custom);
146 setOperationAction(ISD::VAEND, MVT::Other, Expand);
147 setOperationAction(ISD::VAARG, MVT::Other, Expand);
148 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
149
150 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
151 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
152
153 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
154
155 computeRegisterProperties(STI.getRegisterInfo());
156
157 // 2^2 bytes
158 // FIXME can it be just 2^1?
159 setMinFunctionAlignment(Align::Constant<2>());
160}
161
162EVT M68kTargetLowering::getSetCCResultType(const DataLayout &DL,
163 LLVMContext &Context, EVT VT) const {
164 // M68k SETcc producess either 0x00 or 0xFF
165 return MVT::i8;
166}
167
168MVT M68kTargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
169 EVT Ty) const {
170 if (Ty.isSimple()) {
171 return Ty.getSimpleVT();
172 }
173 return MVT::getIntegerVT(8 * DL.getPointerSize(0));
174}
175
176#include "M68kGenCallingConv.inc"
177
178enum StructReturnType { NotStructReturn, RegStructReturn, StackStructReturn };
179
180static StructReturnType
181callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
182 if (Outs.empty())
183 return NotStructReturn;
184
185 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
186 if (!Flags.isSRet())
187 return NotStructReturn;
188 if (Flags.isInReg())
189 return RegStructReturn;
190 return StackStructReturn;
191}
192
193/// Determines whether a function uses struct return semantics.
194static StructReturnType
195argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
196 if (Ins.empty())
197 return NotStructReturn;
198
199 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
200 if (!Flags.isSRet())
201 return NotStructReturn;
202 if (Flags.isInReg())
203 return RegStructReturn;
204 return StackStructReturn;
205}
206
207/// Make a copy of an aggregate at address specified by "Src" to address
208/// "Dst" with size and alignment information specified by the specific
209/// parameter attribute. The copy will be passed as a byval function parameter.
210static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
211 SDValue Chain, ISD::ArgFlagsTy Flags,
212 SelectionDAG &DAG, const SDLoc &DL) {
213 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), DL, MVT::i32);
214
215 return DAG.getMemcpy(
216 Chain, DL, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
217 /*isVolatile=*/false, /*AlwaysInline=*/true,
218 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
219}
220
221/// Return true if the calling convention is one that we can guarantee TCO for.
222static bool canGuaranteeTCO(CallingConv::ID CC) { return false; }
223
224/// Return true if we might ever do TCO for calls with this calling convention.
225static bool mayTailCallThisCC(CallingConv::ID CC) {
226 switch (CC) {
227 // C calling conventions:
228 case CallingConv::C:
229 return true;
230 default:
231 return canGuaranteeTCO(CC);
232 }
233}
234
235/// Return true if the function is being made into a tailcall target by
236/// changing its ABI.
237static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
238 return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
239}
240
241/// Return true if the given stack call argument is already available in the
242/// same position (relatively) of the caller's incoming argument stack.
243static bool MatchingStackOffset(SDValue Arg, unsigned Offset,
244 ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI,
245 const MachineRegisterInfo *MRI,
246 const M68kInstrInfo *TII,
247 const CCValAssign &VA) {
248 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
249
250 for (;;) {
251 // Look through nodes that don't alter the bits of the incoming value.
252 unsigned Op = Arg.getOpcode();
253 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
254 Arg = Arg.getOperand(0);
255 continue;
256 }
257 if (Op == ISD::TRUNCATE) {
258 const SDValue &TruncInput = Arg.getOperand(0);
259 if (TruncInput.getOpcode() == ISD::AssertZext &&
260 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
261 Arg.getValueType()) {
262 Arg = TruncInput.getOperand(0);
263 continue;
264 }
265 }
266 break;
267 }
268
269 int FI = INT_MAX2147483647;
270 if (Arg.getOpcode() == ISD::CopyFromReg) {
271 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
272 if (!Register::isVirtualRegister(VR))
273 return false;
274 MachineInstr *Def = MRI->getVRegDef(VR);
275 if (!Def)
276 return false;
277 if (!Flags.isByVal()) {
278 if (!TII->isLoadFromStackSlot(*Def, FI))
279 return false;
280 } else {
281 unsigned Opcode = Def->getOpcode();
282 if ((Opcode == M68k::LEA32p || Opcode == M68k::LEA32f) &&
283 Def->getOperand(1).isFI()) {
284 FI = Def->getOperand(1).getIndex();
285 Bytes = Flags.getByValSize();
286 } else
287 return false;
288 }
289 } else if (auto *Ld = dyn_cast<LoadSDNode>(Arg)) {
290 if (Flags.isByVal())
291 // ByVal argument is passed in as a pointer but it's now being
292 // dereferenced. e.g.
293 // define @foo(%struct.X* %A) {
294 // tail call @bar(%struct.X* byval %A)
295 // }
296 return false;
297 SDValue Ptr = Ld->getBasePtr();
298 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
299 if (!FINode)
300 return false;
301 FI = FINode->getIndex();
302 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
303 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
304 FI = FINode->getIndex();
305 Bytes = Flags.getByValSize();
306 } else
307 return false;
308
309 assert(FI != INT_MAX)((FI != 2147483647) ? static_cast<void> (0) : __assert_fail
("FI != INT_MAX", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 309, __PRETTY_FUNCTION__))
;
310 if (!MFI.isFixedObjectIndex(FI))
311 return false;
312
313 if (Offset != MFI.getObjectOffset(FI))
314 return false;
315
316 if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
317 // If the argument location is wider than the argument type, check that any
318 // extension flags match.
319 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
320 Flags.isSExt() != MFI.isObjectSExt(FI)) {
321 return false;
322 }
323 }
324
325 return Bytes == MFI.getObjectSize(FI);
326}
327
328SDValue
329M68kTargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
330 MachineFunction &MF = DAG.getMachineFunction();
331 M68kMachineFunctionInfo *FuncInfo = MF.getInfo<M68kMachineFunctionInfo>();
332 int ReturnAddrIndex = FuncInfo->getRAIndex();
333
334 if (ReturnAddrIndex == 0) {
335 // Set up a frame object for the return address.
336 unsigned SlotSize = Subtarget.getSlotSize();
337 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(
338 SlotSize, -(int64_t)SlotSize, false);
339 FuncInfo->setRAIndex(ReturnAddrIndex);
340 }
341
342 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
343}
344
345SDValue M68kTargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
346 SDValue &OutRetAddr,
347 SDValue Chain,
348 bool IsTailCall, int FPDiff,
349 const SDLoc &DL) const {
350 EVT VT = getPointerTy(DAG.getDataLayout());
351 OutRetAddr = getReturnAddressFrameIndex(DAG);
352
353 // Load the "old" Return address.
354 OutRetAddr = DAG.getLoad(VT, DL, Chain, OutRetAddr, MachinePointerInfo());
355 return SDValue(OutRetAddr.getNode(), 1);
356}
357
358SDValue M68kTargetLowering::EmitTailCallStoreRetAddr(
359 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetFI,
360 EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &DL) const {
361 if (!FPDiff)
362 return Chain;
363
364 // Calculate the new stack slot for the return address.
365 int NewFO = MF.getFrameInfo().CreateFixedObject(
366 SlotSize, (int64_t)FPDiff - SlotSize, false);
367
368 SDValue NewFI = DAG.getFrameIndex(NewFO, PtrVT);
369 // Store the return address to the appropriate stack slot.
370 Chain = DAG.getStore(
371 Chain, DL, RetFI, NewFI,
372 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFO));
373 return Chain;
374}
375
376SDValue
377M68kTargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
378 const SmallVectorImpl<ISD::InputArg> &Ins,
379 const SDLoc &DL, SelectionDAG &DAG,
380 const CCValAssign &VA,
381 MachineFrameInfo &MFI,
382 unsigned ArgIdx) const {
383 // Create the nodes corresponding to a load from this parameter slot.
384 ISD::ArgFlagsTy Flags = Ins[ArgIdx].Flags;
385 EVT ValVT;
386
387 // If value is passed by pointer we have address passed instead of the value
388 // itself.
389 if (VA.getLocInfo() == CCValAssign::Indirect)
390 ValVT = VA.getLocVT();
391 else
392 ValVT = VA.getValVT();
393
394 // Because we are dealing with BE architecture we need to offset loading of
395 // partial types
396 int Offset = VA.getLocMemOffset();
397 if (VA.getValVT() == MVT::i8) {
398 Offset += 3;
399 } else if (VA.getValVT() == MVT::i16) {
400 Offset += 2;
401 }
402
403 // TODO Interrupt handlers
404 // Calculate SP offset of interrupt parameter, re-arrange the slot normally
405 // taken by a return address.
406
407 // FIXME For now, all byval parameter objects are marked mutable. This can
408 // be changed with more analysis. In case of tail call optimization mark all
409 // arguments mutable. Since they could be overwritten by lowering of arguments
410 // in case of a tail call.
411 bool AlwaysUseMutable = shouldGuaranteeTCO(
412 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
413 bool IsImmutable = !AlwaysUseMutable && !Flags.isByVal();
414
415 if (Flags.isByVal()) {
416 unsigned Bytes = Flags.getByValSize();
417 if (Bytes == 0)
418 Bytes = 1; // Don't create zero-sized stack objects.
419 int FI = MFI.CreateFixedObject(Bytes, Offset, IsImmutable);
420 // TODO Interrupt handlers
421 // Adjust SP offset of interrupt parameter.
422 return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
423 } else {
424 int FI =
425 MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, Offset, IsImmutable);
426
427 // Set SExt or ZExt flag.
428 if (VA.getLocInfo() == CCValAssign::ZExt) {
429 MFI.setObjectZExt(FI, true);
430 } else if (VA.getLocInfo() == CCValAssign::SExt) {
431 MFI.setObjectSExt(FI, true);
432 }
433
434 // TODO Interrupt handlers
435 // Adjust SP offset of interrupt parameter.
436
437 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
438 SDValue Val = DAG.getLoad(
439 ValVT, DL, Chain, FIN,
440 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
441 return VA.isExtInLoc() ? DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val)
442 : Val;
443 }
444}
445
446SDValue M68kTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
447 SDValue Arg, const SDLoc &DL,
448 SelectionDAG &DAG,
449 const CCValAssign &VA,
450 ISD::ArgFlagsTy Flags) const {
451 unsigned LocMemOffset = VA.getLocMemOffset();
452 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, DL);
453 PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
454 StackPtr, PtrOff);
455 if (Flags.isByVal())
456 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, DL);
457
458 return DAG.getStore(
459 Chain, DL, Arg, PtrOff,
460 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
461}
462
463//===----------------------------------------------------------------------===//
464// Call
465//===----------------------------------------------------------------------===//
466
467SDValue M68kTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
468 SmallVectorImpl<SDValue> &InVals) const {
469 SelectionDAG &DAG = CLI.DAG;
470 SDLoc &DL = CLI.DL;
471 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
472 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
473 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
474 SDValue Chain = CLI.Chain;
475 SDValue Callee = CLI.Callee;
476 CallingConv::ID CallConv = CLI.CallConv;
477 bool &IsTailCall = CLI.IsTailCall;
478 bool IsVarArg = CLI.IsVarArg;
479
480 MachineFunction &MF = DAG.getMachineFunction();
481 StructReturnType SR = callIsStructReturn(Outs);
482 bool IsSibcall = false;
483 M68kMachineFunctionInfo *MFI = MF.getInfo<M68kMachineFunctionInfo>();
484 // const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
485
486 if (CallConv == CallingConv::M68k_INTR)
487 report_fatal_error("M68k interrupts may not be called directly");
488
489 auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
490 if (Attr.getValueAsString() == "true")
491 IsTailCall = false;
492
493 // FIXME Add tailcalls support
494
495 bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
496 if (IsMustTail) {
497 // Force this to be a tail call. The verifier rules are enough to ensure
498 // that we can lower this successfully without moving the return address
499 // around.
500 IsTailCall = true;
501 } else if (IsTailCall) {
502 // Check if it's really possible to do a tail call.
503 IsTailCall = IsEligibleForTailCallOptimization(
504 Callee, CallConv, IsVarArg, SR != NotStructReturn,
505 MF.getFunction().hasStructRetAttr(), CLI.RetTy, Outs, OutVals, Ins,
506 DAG);
507
508 // Sibcalls are automatically detected tailcalls which do not require
509 // ABI changes.
510 if (!MF.getTarget().Options.GuaranteedTailCallOpt && IsTailCall)
511 IsSibcall = true;
512
513 if (IsTailCall)
514 ++NumTailCalls;
515 }
516
517 assert(!(IsVarArg && canGuaranteeTCO(CallConv)) &&((!(IsVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc") ? static_cast
<void> (0) : __assert_fail ("!(IsVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling convention fastcc\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 518, __PRETTY_FUNCTION__))
518 "Var args not supported with calling convention fastcc")((!(IsVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc") ? static_cast
<void> (0) : __assert_fail ("!(IsVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling convention fastcc\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 518, __PRETTY_FUNCTION__))
;
519
520 // Analyze operands of the call, assigning locations to each operand.
521 SmallVector<CCValAssign, 16> ArgLocs;
522 // It is empty for LibCall
523 const Function *CalleeFunc = CLI.CB ? CLI.CB->getCalledFunction() : nullptr;
524 M68kCCState CCInfo(*CalleeFunc, CallConv, IsVarArg, MF, ArgLocs,
525 *DAG.getContext());
526 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
527
528 // Get a count of how many bytes are to be pushed on the stack.
529 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
530 if (IsSibcall) {
531 // This is a sibcall. The memory operands are available in caller's
532 // own caller's stack.
533 NumBytes = 0;
534 } else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
535 canGuaranteeTCO(CallConv)) {
536 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
537 }
538
539 int FPDiff = 0;
540 if (IsTailCall && !IsSibcall && !IsMustTail) {
541 // Lower arguments at fp - stackoffset + fpdiff.
542 unsigned NumBytesCallerPushed = MFI->getBytesToPopOnReturn();
543
544 FPDiff = NumBytesCallerPushed - NumBytes;
545
546 // Set the delta of movement of the returnaddr stackslot.
547 // But only set if delta is greater than previous delta.
548 if (FPDiff < MFI->getTCReturnAddrDelta())
549 MFI->setTCReturnAddrDelta(FPDiff);
550 }
551
552 unsigned NumBytesToPush = NumBytes;
553 unsigned NumBytesToPop = NumBytes;
554
555 // If we have an inalloca argument, all stack space has already been allocated
556 // for us and be right at the top of the stack. We don't support multiple
557 // arguments passed in memory when using inalloca.
558 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
559 NumBytesToPush = 0;
560 if (!ArgLocs.back().isMemLoc())
561 report_fatal_error("cannot use inalloca attribute on a register "
562 "parameter");
563 if (ArgLocs.back().getLocMemOffset() != 0)
564 report_fatal_error("any parameter with the inalloca attribute must be "
565 "the only memory argument");
566 }
567
568 if (!IsSibcall)
569 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
570 NumBytes - NumBytesToPush, DL);
571
572 SDValue RetFI;
573 // Load return address for tail calls.
574 if (IsTailCall && FPDiff)
575 Chain = EmitTailCallLoadRetAddr(DAG, RetFI, Chain, IsTailCall, FPDiff, DL);
576
577 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
578 SmallVector<SDValue, 8> MemOpChains;
579 SDValue StackPtr;
580
581 // Walk the register/memloc assignments, inserting copies/loads. In the case
582 // of tail call optimization arguments are handle later.
583 const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
584 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
585 ISD::ArgFlagsTy Flags = Outs[i].Flags;
586
587 // Skip inalloca arguments, they have already been written.
588 if (Flags.isInAlloca())
589 continue;
590
591 CCValAssign &VA = ArgLocs[i];
592 EVT RegVT = VA.getLocVT();
593 SDValue Arg = OutVals[i];
594 bool IsByVal = Flags.isByVal();
595
596 // Promote the value if needed.
597 switch (VA.getLocInfo()) {
598 default:
599 llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 599)
;
600 case CCValAssign::Full:
601 break;
602 case CCValAssign::SExt:
603 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
604 break;
605 case CCValAssign::ZExt:
606 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
607 break;
608 case CCValAssign::AExt:
609 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
610 break;
611 case CCValAssign::BCvt:
612 Arg = DAG.getBitcast(RegVT, Arg);
613 break;
614 case CCValAssign::Indirect: {
615 // Store the argument.
616 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
617 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
618 Chain = DAG.getStore(
619 Chain, DL, Arg, SpillSlot,
620 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
621 Arg = SpillSlot;
622 break;
623 }
624 }
625
626 if (VA.isRegLoc()) {
627 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
628 } else if (!IsSibcall && (!IsTailCall || IsByVal)) {
629 assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail
("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 629, __PRETTY_FUNCTION__))
;
630 if (!StackPtr.getNode()) {
631 StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
632 getPointerTy(DAG.getDataLayout()));
633 }
634 MemOpChains.push_back(
635 LowerMemOpCallTo(Chain, StackPtr, Arg, DL, DAG, VA, Flags));
636 }
637 }
638
639 if (!MemOpChains.empty())
640 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
641
642 // FIXME Make sure PIC style GOT works as expected
643 // The only time GOT is really needed is for Medium-PIC static data
644 // otherwise we are happy with pc-rel or static references
645
646 if (IsVarArg && IsMustTail) {
647 const auto &Forwards = MFI->getForwardedMustTailRegParms();
648 for (const auto &F : Forwards) {
649 SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
650 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
651 }
652 }
653
654 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
655 // don't need this because the eligibility check rejects calls that require
656 // shuffling arguments passed in memory.
657 if (!IsSibcall && IsTailCall) {
658 // Force all the incoming stack arguments to be loaded from the stack
659 // before any new outgoing arguments are stored to the stack, because the
660 // outgoing stack slots may alias the incoming argument stack slots, and
661 // the alias isn't otherwise explicit. This is slightly more conservative
662 // than necessary, because it means that each store effectively depends
663 // on every argument instead of just those arguments it would clobber.
664 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
665
666 SmallVector<SDValue, 8> MemOpChains2;
667 SDValue FIN;
668 int FI = 0;
669 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
670 CCValAssign &VA = ArgLocs[i];
671 if (VA.isRegLoc())
672 continue;
673 assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail
("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 673, __PRETTY_FUNCTION__))
;
674 SDValue Arg = OutVals[i];
675 ISD::ArgFlagsTy Flags = Outs[i].Flags;
676 // Skip inalloca arguments. They don't require any work.
677 if (Flags.isInAlloca())
678 continue;
679 // Create frame index.
680 int32_t Offset = VA.getLocMemOffset() + FPDiff;
681 uint32_t OpSize = (VA.getLocVT().getSizeInBits() + 7) / 8;
682 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
683 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
684
685 if (Flags.isByVal()) {
686 // Copy relative to framepointer.
687 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), DL);
688 if (!StackPtr.getNode()) {
689 StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
690 getPointerTy(DAG.getDataLayout()));
691 }
692 Source = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
693 StackPtr, Source);
694
695 MemOpChains2.push_back(
696 CreateCopyOfByValArgument(Source, FIN, ArgChain, Flags, DAG, DL));
697 } else {
698 // Store relative to framepointer.
699 MemOpChains2.push_back(DAG.getStore(
700 ArgChain, DL, Arg, FIN,
701 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
702 }
703 }
704
705 if (!MemOpChains2.empty())
706 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains2);
707
708 // Store the return address to the appropriate stack slot.
709 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetFI,
710 getPointerTy(DAG.getDataLayout()),
711 Subtarget.getSlotSize(), FPDiff, DL);
712 }
713
714 // Build a sequence of copy-to-reg nodes chained together with token chain
715 // and flag operands which copy the outgoing args into registers.
716 SDValue InFlag;
717 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
718 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
719 RegsToPass[i].second, InFlag);
720 InFlag = Chain.getValue(1);
721 }
722
723 if (Callee->getOpcode() == ISD::GlobalAddress) {
724 // If the callee is a GlobalAddress node (quite common, every direct call
725 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
726 // it.
727 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
728
729 // We should use extra load for direct calls to dllimported functions in
730 // non-JIT mode.
731 const GlobalValue *GV = G->getGlobal();
732 if (!GV->hasDLLImportStorageClass()) {
733 unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
734
735 Callee = DAG.getTargetGlobalAddress(
736 GV, DL, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
737
738 if (OpFlags == M68kII::MO_GOTPCREL) {
739
740 // Add a wrapper.
741 Callee = DAG.getNode(M68kISD::WrapperPC, DL,
742 getPointerTy(DAG.getDataLayout()), Callee);
743
744 // Add extra indirection
745 Callee = DAG.getLoad(
746 getPointerTy(DAG.getDataLayout()), DL, DAG.getEntryNode(), Callee,
747 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
748 }
749 }
750 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
751 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
752 unsigned char OpFlags =
753 Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
754
755 Callee = DAG.getTargetExternalSymbol(
756 S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
757 }
758
759 // Returns a chain & a flag for retval copy to use.
760 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
761 SmallVector<SDValue, 8> Ops;
762
763 if (!IsSibcall && IsTailCall) {
764 Chain = DAG.getCALLSEQ_END(Chain,
765 DAG.getIntPtrConstant(NumBytesToPop, DL, true),
766 DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
767 InFlag = Chain.getValue(1);
768 }
769
770 Ops.push_back(Chain);
771 Ops.push_back(Callee);
772
773 if (IsTailCall)
774 Ops.push_back(DAG.getConstant(FPDiff, DL, MVT::i32));
775
776 // Add argument registers to the end of the list so that they are known live
777 // into the call.
778 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
779 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
780 RegsToPass[i].second.getValueType()));
781
782 // Add a register mask operand representing the call-preserved registers.
783 const uint32_t *Mask = RegInfo->getCallPreservedMask(MF, CallConv);
784 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 784, __PRETTY_FUNCTION__))
;
785
786 Ops.push_back(DAG.getRegisterMask(Mask));
787
788 if (InFlag.getNode())
789 Ops.push_back(InFlag);
790
791 if (IsTailCall) {
792 MF.getFrameInfo().setHasTailCall();
793 return DAG.getNode(M68kISD::TC_RETURN, DL, NodeTys, Ops);
794 }
795
796 Chain = DAG.getNode(M68kISD::CALL, DL, NodeTys, Ops);
797 InFlag = Chain.getValue(1);
798
799 // Create the CALLSEQ_END node.
800 unsigned NumBytesForCalleeToPop;
801 if (M68k::isCalleePop(CallConv, IsVarArg,
802 DAG.getTarget().Options.GuaranteedTailCallOpt)) {
803 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
804 } else if (!canGuaranteeTCO(CallConv) && SR == StackStructReturn) {
805 // If this is a call to a struct-return function, the callee
806 // pops the hidden struct pointer, so we have to push it back.
807 NumBytesForCalleeToPop = 4;
808 } else {
809 NumBytesForCalleeToPop = 0; // Callee pops nothing.
810 }
811
812 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
813 // No need to reset the stack after the call if the call doesn't return. To
814 // make the MI verify, we'll pretend the callee does it for us.
815 NumBytesForCalleeToPop = NumBytes;
816 }
817
818 // Returns a flag for retval copy to use.
819 if (!IsSibcall) {
820 Chain = DAG.getCALLSEQ_END(
821 Chain, DAG.getIntPtrConstant(NumBytesToPop, DL, true),
822 DAG.getIntPtrConstant(NumBytesForCalleeToPop, DL, true), InFlag, DL);
823 InFlag = Chain.getValue(1);
824 }
825
826 // Handle result values, copying them out of physregs into vregs that we
827 // return.
828 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
829 InVals);
830}
831
832SDValue M68kTargetLowering::LowerCallResult(
833 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
834 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
835 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
836
837 // Assign locations to each value returned by this call.
838 SmallVector<CCValAssign, 16> RVLocs;
839 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
840 *DAG.getContext());
841 CCInfo.AnalyzeCallResult(Ins, RetCC_M68k);
842
843 // Copy all of the result registers out of their specified physreg.
844 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
845 CCValAssign &VA = RVLocs[i];
846 EVT CopyVT = VA.getLocVT();
847
848 /// ??? is this correct?
849 Chain = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), CopyVT, InFlag)
850 .getValue(1);
851 SDValue Val = Chain.getValue(0);
852
853 if (VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1)
854 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
855
856 InFlag = Chain.getValue(2);
857 InVals.push_back(Val);
858 }
859
860 return Chain;
861}
862
863//===----------------------------------------------------------------------===//
864// Formal Arguments Calling Convention Implementation
865//===----------------------------------------------------------------------===//
866
867SDValue M68kTargetLowering::LowerFormalArguments(
868 SDValue Chain, CallingConv::ID CCID, bool IsVarArg,
869 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
870 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
871 MachineFunction &MF = DAG.getMachineFunction();
872 M68kMachineFunctionInfo *MMFI = MF.getInfo<M68kMachineFunctionInfo>();
873 // const TargetFrameLowering &TFL = *Subtarget.getFrameLowering();
874
875 MachineFrameInfo &MFI = MF.getFrameInfo();
876
877 // Assign locations to all of the incoming arguments.
878 SmallVector<CCValAssign, 16> ArgLocs;
879 M68kCCState CCInfo(MF.getFunction(), CCID, IsVarArg, MF, ArgLocs,
880 *DAG.getContext());
881
882 CCInfo.AnalyzeFormalArguments(Ins, CC_M68k);
883
884 unsigned LastVal = ~0U;
885 SDValue ArgValue;
886 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
887 CCValAssign &VA = ArgLocs[i];
888 assert(VA.getValNo() != LastVal && "Same value in different locations")((VA.getValNo() != LastVal && "Same value in different locations"
) ? static_cast<void> (0) : __assert_fail ("VA.getValNo() != LastVal && \"Same value in different locations\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 888, __PRETTY_FUNCTION__))
;
889
890 LastVal = VA.getValNo();
891
892 if (VA.isRegLoc()) {
893 EVT RegVT = VA.getLocVT();
894 const TargetRegisterClass *RC;
895 if (RegVT == MVT::i32)
896 RC = &M68k::XR32RegClass;
897 else
898 llvm_unreachable("Unknown argument type!")::llvm::llvm_unreachable_internal("Unknown argument type!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 898)
;
899
900 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
901 ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
902
903 // If this is an 8 or 16-bit value, it is really passed promoted to 32
904 // bits. Insert an assert[sz]ext to capture this, then truncate to the
905 // right size.
906 if (VA.getLocInfo() == CCValAssign::SExt) {
907 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
908 DAG.getValueType(VA.getValVT()));
909 } else if (VA.getLocInfo() == CCValAssign::ZExt) {
910 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
911 DAG.getValueType(VA.getValVT()));
912 } else if (VA.getLocInfo() == CCValAssign::BCvt) {
913 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
914 }
915
916 if (VA.isExtInLoc()) {
917 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
918 }
919 } else {
920 assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail
("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 920, __PRETTY_FUNCTION__))
;
921 ArgValue = LowerMemArgument(Chain, CCID, Ins, DL, DAG, VA, MFI, i);
922 }
923
924 // If value is passed via pointer - do a load.
925 // TODO Make sure this handling on indirect arguments is correct
926 if (VA.getLocInfo() == CCValAssign::Indirect)
927 ArgValue =
928 DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, MachinePointerInfo());
929
930 InVals.push_back(ArgValue);
931 }
932
933 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
934 // Swift calling convention does not require we copy the sret argument
935 // into %D0 for the return. We don't set SRetReturnReg for Swift.
936 if (CCID == CallingConv::Swift)
937 continue;
938
939 // ABI require that for returning structs by value we copy the sret argument
940 // into %D0 for the return. Save the argument into a virtual register so
941 // that we can access it from the return points.
942 if (Ins[i].Flags.isSRet()) {
943 unsigned Reg = MMFI->getSRetReturnReg();
944 if (!Reg) {
945 MVT PtrTy = getPointerTy(DAG.getDataLayout());
946 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
947 MMFI->setSRetReturnReg(Reg);
948 }
949 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
950 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
951 break;
952 }
953 }
954
955 unsigned StackSize = CCInfo.getNextStackOffset();
956 // Align stack specially for tail calls.
957 if (shouldGuaranteeTCO(CCID, MF.getTarget().Options.GuaranteedTailCallOpt))
958 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
959
960 // If the function takes variable number of arguments, make a frame index for
961 // the start of the first vararg value... for expansion of llvm.va_start. We
962 // can skip this if there are no va_start calls.
963 if (MFI.hasVAStart()) {
964 MMFI->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
965 }
966
967 if (IsVarArg && MFI.hasMustTailInVarArgFunc()) {
968 // We forward some GPRs and some vector types.
969 SmallVector<MVT, 2> RegParmTypes;
970 MVT IntVT = MVT::i32;
971 RegParmTypes.push_back(IntVT);
972
973 // Compute the set of forwarded registers. The rest are scratch.
974 // ??? what is this for?
975 SmallVectorImpl<ForwardedRegister> &Forwards =
976 MMFI->getForwardedMustTailRegParms();
977 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_M68k);
978
979 // Copy all forwards from physical to virtual registers.
980 for (ForwardedRegister &F : Forwards) {
981 // FIXME Can we use a less constrained schedule?
982 SDValue RegVal = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
983 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
984 Chain = DAG.getCopyToReg(Chain, DL, F.VReg, RegVal);
985 }
986 }
987
988 // Some CCs need callee pop.
989 if (M68k::isCalleePop(CCID, IsVarArg,
990 MF.getTarget().Options.GuaranteedTailCallOpt)) {
991 MMFI->setBytesToPopOnReturn(StackSize); // Callee pops everything.
992 } else {
993 MMFI->setBytesToPopOnReturn(0); // Callee pops nothing.
994 // If this is an sret function, the return should pop the hidden pointer.
995 if (!canGuaranteeTCO(CCID) && argsAreStructReturn(Ins) == StackStructReturn)
996 MMFI->setBytesToPopOnReturn(4);
997 }
998
999 MMFI->setArgumentStackSize(StackSize);
1000
1001 return Chain;
1002}
1003
1004//===----------------------------------------------------------------------===//
1005// Return Value Calling Convention Implementation
1006//===----------------------------------------------------------------------===//
1007
1008SDValue
1009M68kTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CCID,
1010 bool IsVarArg,
1011 const SmallVectorImpl<ISD::OutputArg> &Outs,
1012 const SmallVectorImpl<SDValue> &OutVals,
1013 const SDLoc &DL, SelectionDAG &DAG) const {
1014 MachineFunction &MF = DAG.getMachineFunction();
1015 M68kMachineFunctionInfo *MFI = MF.getInfo<M68kMachineFunctionInfo>();
1016
1017 SmallVector<CCValAssign, 16> RVLocs;
1018 CCState CCInfo(CCID, IsVarArg, MF, RVLocs, *DAG.getContext());
1019 CCInfo.AnalyzeReturn(Outs, RetCC_M68k);
1020
1021 SDValue Flag;
1022 SmallVector<SDValue, 6> RetOps;
1023 // Operand #0 = Chain (updated below)
1024 RetOps.push_back(Chain);
1025 // Operand #1 = Bytes To Pop
1026 RetOps.push_back(
1027 DAG.getTargetConstant(MFI->getBytesToPopOnReturn(), DL, MVT::i32));
1028
1029 // Copy the result values into the output registers.
1030 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1031 CCValAssign &VA = RVLocs[i];
1032 assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ?
static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1032, __PRETTY_FUNCTION__))
;
1033 SDValue ValToCopy = OutVals[i];
1034 EVT ValVT = ValToCopy.getValueType();
1035
1036 // Promote values to the appropriate types.
1037 if (VA.getLocInfo() == CCValAssign::SExt)
1038 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1039 else if (VA.getLocInfo() == CCValAssign::ZExt)
1040 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), ValToCopy);
1041 else if (VA.getLocInfo() == CCValAssign::AExt) {
1042 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
1043 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1044 else
1045 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), ValToCopy);
1046 } else if (VA.getLocInfo() == CCValAssign::BCvt)
1047 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
1048
1049 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), ValToCopy, Flag);
1050 Flag = Chain.getValue(1);
1051 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1052 }
1053
1054 // Swift calling convention does not require we copy the sret argument
1055 // into %d0 for the return, and SRetReturnReg is not set for Swift.
1056
1057 // ABI require that for returning structs by value we copy the sret argument
1058 // into %D0 for the return. Save the argument into a virtual register so that
1059 // we can access it from the return points.
1060 //
1061 // Checking Function.hasStructRetAttr() here is insufficient because the IR
1062 // may not have an explicit sret argument. If MFI.CanLowerReturn is
1063 // false, then an sret argument may be implicitly inserted in the SelDAG. In
1064 // either case MFI->setSRetReturnReg() will have been called.
1065 if (unsigned SRetReg = MFI->getSRetReturnReg()) {
1066 // ??? Can i just move this to the top and escape this explanation?
1067 // When we have both sret and another return value, we should use the
1068 // original Chain stored in RetOps[0], instead of the current Chain updated
1069 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
1070
1071 // For the case of sret and another return value, we have
1072 // Chain_0 at the function entry
1073 // Chain_1 = getCopyToReg(Chain_0) in the above loop
1074 // If we use Chain_1 in getCopyFromReg, we will have
1075 // Val = getCopyFromReg(Chain_1)
1076 // Chain_2 = getCopyToReg(Chain_1, Val) from below
1077
1078 // getCopyToReg(Chain_0) will be glued together with
1079 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
1080 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
1081 // Data dependency from Unit B to Unit A due to usage of Val in
1082 // getCopyToReg(Chain_1, Val)
1083 // Chain dependency from Unit A to Unit B
1084
1085 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
1086 SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
1087 getPointerTy(MF.getDataLayout()));
1088
1089 // ??? How will this work if CC does not use registers for args passing?
1090 // ??? What if I return multiple structs?
1091 unsigned RetValReg = M68k::D0;
1092 Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag);
1093 Flag = Chain.getValue(1);
1094
1095 RetOps.push_back(
1096 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
1097 }
1098
1099 RetOps[0] = Chain; // Update chain.
1100
1101 // Add the flag if we have it.
1102 if (Flag.getNode())
1103 RetOps.push_back(Flag);
1104
1105 return DAG.getNode(M68kISD::RET, DL, MVT::Other, RetOps);
1106}
1107
1108//===----------------------------------------------------------------------===//
1109// Fast Calling Convention (tail call) implementation
1110//===----------------------------------------------------------------------===//
1111
1112// Like std call, callee cleans arguments, convention except that ECX is
1113// reserved for storing the tail called function address. Only 2 registers are
1114// free for argument passing (inreg). Tail call optimization is performed
1115// provided:
1116// * tailcallopt is enabled
1117// * caller/callee are fastcc
1118// On M68k_64 architecture with GOT-style position independent code only
1119// local (within module) calls are supported at the moment. To keep the stack
1120// aligned according to platform abi the function GetAlignedArgumentStackSize
1121// ensures that argument delta is always multiples of stack alignment. (Dynamic
1122// linkers need this - darwin's dyld for example) If a tail called function
1123// callee has more arguments than the caller the caller needs to make sure that
1124// there is room to move the RETADDR to. This is achieved by reserving an area
1125// the size of the argument delta right after the original RETADDR, but before
1126// the saved framepointer or the spilled registers e.g. caller(arg1, arg2)
1127// calls callee(arg1, arg2,arg3,arg4) stack layout:
1128// arg1
1129// arg2
1130// RETADDR
1131// [ new RETADDR
1132// move area ]
1133// (possible EBP)
1134// ESI
1135// EDI
1136// local1 ..
1137
1138/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
1139/// requirement.
1140unsigned
1141M68kTargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
1142 SelectionDAG &DAG) const {
1143 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
1144 unsigned StackAlignment = TFI.getStackAlignment();
1145 uint64_t AlignMask = StackAlignment - 1;
1146 int64_t Offset = StackSize;
1147 unsigned SlotSize = Subtarget.getSlotSize();
1148 if ((Offset & AlignMask) <= (StackAlignment - SlotSize)) {
1149 // Number smaller than 12 so just add the difference.
1150 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
1151 } else {
1152 // Mask out lower bits, add stackalignment once plus the 12 bytes.
1153 Offset =
1154 ((~AlignMask) & Offset) + StackAlignment + (StackAlignment - SlotSize);
1155 }
1156 return Offset;
1157}
1158
1159/// Check whether the call is eligible for tail call optimization. Targets
1160/// that want to do tail call optimization should implement this function.
1161bool M68kTargetLowering::IsEligibleForTailCallOptimization(
1162 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
1163 bool IsCalleeStructRet, bool IsCallerStructRet, Type *RetTy,
1164 const SmallVectorImpl<ISD::OutputArg> &Outs,
1165 const SmallVectorImpl<SDValue> &OutVals,
1166 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
1167 if (!mayTailCallThisCC(CalleeCC))
1168 return false;
1169
1170 // If -tailcallopt is specified, make fastcc functions tail-callable.
1171 MachineFunction &MF = DAG.getMachineFunction();
1172 const auto &CallerF = MF.getFunction();
1173
1174 CallingConv::ID CallerCC = CallerF.getCallingConv();
1175 bool CCMatch = CallerCC == CalleeCC;
1176
1177 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
1178 if (canGuaranteeTCO(CalleeCC) && CCMatch)
1179 return true;
1180 return false;
1181 }
1182
1183 // Look for obvious safe cases to perform tail call optimization that do not
1184 // require ABI changes. This is what gcc calls sibcall.
1185
1186 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
1187 // emit a special epilogue.
1188 const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1189 if (RegInfo->hasStackRealignment(MF))
1190 return false;
1191
1192 // Also avoid sibcall optimization if either caller or callee uses struct
1193 // return semantics.
1194 if (IsCalleeStructRet || IsCallerStructRet)
1195 return false;
1196
1197 // Do not sibcall optimize vararg calls unless all arguments are passed via
1198 // registers.
1199 LLVMContext &C = *DAG.getContext();
1200 if (IsVarArg && !Outs.empty()) {
1201
1202 SmallVector<CCValAssign, 16> ArgLocs;
1203 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1204
1205 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1206 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1207 if (!ArgLocs[i].isRegLoc())
1208 return false;
1209 }
1210
1211 // Check that the call results are passed in the same way.
1212 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, RetCC_M68k,
1213 RetCC_M68k))
1214 return false;
1215
1216 // The callee has to preserve all registers the caller needs to preserve.
1217 const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
1218 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1219 if (!CCMatch) {
1220 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1221 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1222 return false;
1223 }
1224
1225 unsigned StackArgsSize = 0;
1226
1227 // If the callee takes no arguments then go on to check the results of the
1228 // call.
1229 if (!Outs.empty()) {
1230 // Check if stack adjustment is needed. For now, do not do this if any
1231 // argument is passed on the stack.
1232 SmallVector<CCValAssign, 16> ArgLocs;
1233 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1234
1235 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1236 StackArgsSize = CCInfo.getNextStackOffset();
1237
1238 if (CCInfo.getNextStackOffset()) {
1239 // Check if the arguments are already laid out in the right way as
1240 // the caller's fixed stack objects.
1241 MachineFrameInfo &MFI = MF.getFrameInfo();
1242 const MachineRegisterInfo *MRI = &MF.getRegInfo();
1243 const M68kInstrInfo *TII = Subtarget.getInstrInfo();
1244 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1245 CCValAssign &VA = ArgLocs[i];
1246 SDValue Arg = OutVals[i];
1247 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1248 if (VA.getLocInfo() == CCValAssign::Indirect)
1249 return false;
1250 if (!VA.isRegLoc()) {
1251 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
1252 TII, VA))
1253 return false;
1254 }
1255 }
1256 }
1257
1258 bool PositionIndependent = isPositionIndependent();
1259 // If the tailcall address may be in a register, then make sure it's
1260 // possible to register allocate for it. The call address can
1261 // only target %A0 or %A1 since the tail call must be scheduled after
1262 // callee-saved registers are restored. These happen to be the same
1263 // registers used to pass 'inreg' arguments so watch out for those.
1264 if ((!isa<GlobalAddressSDNode>(Callee) &&
1265 !isa<ExternalSymbolSDNode>(Callee)) ||
1266 PositionIndependent) {
1267 unsigned NumInRegs = 0;
1268 // In PIC we need an extra register to formulate the address computation
1269 // for the callee.
1270 unsigned MaxInRegs = PositionIndependent ? 1 : 2;
1271
1272 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1273 CCValAssign &VA = ArgLocs[i];
1274 if (!VA.isRegLoc())
1275 continue;
1276 unsigned Reg = VA.getLocReg();
1277 switch (Reg) {
1278 default:
1279 break;
1280 case M68k::A0:
1281 case M68k::A1:
1282 if (++NumInRegs == MaxInRegs)
1283 return false;
1284 break;
1285 }
1286 }
1287 }
1288
1289 const MachineRegisterInfo &MRI = MF.getRegInfo();
1290 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
1291 return false;
1292 }
1293
1294 bool CalleeWillPop = M68k::isCalleePop(
1295 CalleeCC, IsVarArg, MF.getTarget().Options.GuaranteedTailCallOpt);
1296
1297 if (unsigned BytesToPop =
1298 MF.getInfo<M68kMachineFunctionInfo>()->getBytesToPopOnReturn()) {
1299 // If we have bytes to pop, the callee must pop them.
1300 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
1301 if (!CalleePopMatches)
1302 return false;
1303 } else if (CalleeWillPop && StackArgsSize > 0) {
1304 // If we don't have bytes to pop, make sure the callee doesn't pop any.
1305 return false;
1306 }
1307
1308 return true;
1309}
1310
1311//===----------------------------------------------------------------------===//
1312// Custom Lower
1313//===----------------------------------------------------------------------===//
1314
1315SDValue M68kTargetLowering::LowerOperation(SDValue Op,
1316 SelectionDAG &DAG) const {
1317 switch (Op.getOpcode()) {
1318 default:
1319 llvm_unreachable("Should not custom lower this!")::llvm::llvm_unreachable_internal("Should not custom lower this!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1319)
;
1320 case ISD::SADDO:
1321 case ISD::UADDO:
1322 case ISD::SSUBO:
1323 case ISD::USUBO:
1324 case ISD::SMULO:
1325 case ISD::UMULO:
1326 return LowerXALUO(Op, DAG);
1327 case ISD::SETCC:
1328 return LowerSETCC(Op, DAG);
1329 case ISD::SETCCCARRY:
1330 return LowerSETCCCARRY(Op, DAG);
1331 case ISD::SELECT:
1332 return LowerSELECT(Op, DAG);
1333 case ISD::BRCOND:
1334 return LowerBRCOND(Op, DAG);
1335 case ISD::ADDC:
1336 case ISD::ADDE:
1337 case ISD::SUBC:
1338 case ISD::SUBE:
1339 return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
1340 case ISD::ConstantPool:
1341 return LowerConstantPool(Op, DAG);
1342 case ISD::GlobalAddress:
1343 return LowerGlobalAddress(Op, DAG);
1344 case ISD::ExternalSymbol:
1345 return LowerExternalSymbol(Op, DAG);
1346 case ISD::BlockAddress:
1347 return LowerBlockAddress(Op, DAG);
1348 case ISD::JumpTable:
1349 return LowerJumpTable(Op, DAG);
1350 case ISD::VASTART:
1351 return LowerVASTART(Op, DAG);
1352 case ISD::DYNAMIC_STACKALLOC:
1353 return LowerDYNAMIC_STACKALLOC(Op, DAG);
1354 }
1355}
1356
1357bool M68kTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
1358 SDValue C) const {
1359 // Shifts and add instructions in M68000 and M68010 support
1360 // up to 32 bits, but mul only has 16-bit variant. So it's almost
1361 // certainly beneficial to lower 8/16/32-bit mul to their
1362 // add / shifts counterparts. But for 64-bits mul, it might be
1363 // safer to just leave it to compiler runtime implementations.
1364 return VT.bitsLE(MVT::i32) || Subtarget.atLeastM68020();
1365}
1366
1367SDValue M68kTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
1368 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
1369 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
1370 // looks for this combo and may remove the "setcc" instruction if the "setcc"
1371 // has only one use.
1372 SDNode *N = Op.getNode();
1373 SDValue LHS = N->getOperand(0);
1374 SDValue RHS = N->getOperand(1);
1375 unsigned BaseOp = 0;
1376 unsigned Cond = 0;
1377 SDLoc DL(Op);
1378 switch (Op.getOpcode()) {
1379 default:
1380 llvm_unreachable("Unknown ovf instruction!")::llvm::llvm_unreachable_internal("Unknown ovf instruction!",
"/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1380)
;
1381 case ISD::SADDO:
1382 BaseOp = M68kISD::ADD;
1383 Cond = M68k::COND_VS;
1384 break;
1385 case ISD::UADDO:
1386 BaseOp = M68kISD::ADD;
1387 Cond = M68k::COND_CS;
1388 break;
1389 case ISD::SSUBO:
1390 BaseOp = M68kISD::SUB;
1391 Cond = M68k::COND_VS;
1392 break;
1393 case ISD::USUBO:
1394 BaseOp = M68kISD::SUB;
1395 Cond = M68k::COND_CS;
1396 break;
1397 }
1398
1399 // Also sets CCR.
1400 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i8);
1401 SDValue Arith = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
1402 SDValue SetCC = DAG.getNode(M68kISD::SETCC, DL, N->getValueType(1),
1403 DAG.getConstant(Cond, DL, MVT::i8),
1404 SDValue(Arith.getNode(), 1));
1405
1406 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Arith, SetCC);
1407}
1408
1409/// Create a BT (Bit Test) node - Test bit \p BitNo in \p Src and set condition
1410/// according to equal/not-equal condition code \p CC.
1411static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC,
1412 const SDLoc &DL, SelectionDAG &DAG) {
1413 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
1414 // instruction. Since the shift amount is in-range-or-undefined, we know
1415 // that doing a bittest on the i32 value is ok.
1416 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
1417 Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
1418
1419 // If the operand types disagree, extend the shift amount to match. Since
1420 // BT ignores high bits (like shifts) we can use anyextend.
1421 if (Src.getValueType() != BitNo.getValueType())
1422 BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
1423
1424 SDValue BT = DAG.getNode(M68kISD::BT, DL, MVT::i32, Src, BitNo);
1425
1426 // NOTE BTST sets CCR.Z flag
1427 M68k::CondCode Cond = CC == ISD::SETEQ ? M68k::COND_NE : M68k::COND_EQ;
1428 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
1429 DAG.getConstant(Cond, DL, MVT::i8), BT);
1430}
1431
1432/// Result of 'and' is compared against zero. Change to a BT node if possible.
1433static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &DL,
1434 SelectionDAG &DAG) {
1435 SDValue Op0 = And.getOperand(0);
1436 SDValue Op1 = And.getOperand(1);
1437 if (Op0.getOpcode() == ISD::TRUNCATE)
1438 Op0 = Op0.getOperand(0);
1439 if (Op1.getOpcode() == ISD::TRUNCATE)
1440 Op1 = Op1.getOperand(0);
1441
1442 SDValue LHS, RHS;
1443 if (Op1.getOpcode() == ISD::SHL)
1444 std::swap(Op0, Op1);
1445 if (Op0.getOpcode() == ISD::SHL) {
1446 if (isOneConstant(Op0.getOperand(0))) {
1447 // If we looked past a truncate, check that it's only truncating away
1448 // known zeros.
1449 unsigned BitWidth = Op0.getValueSizeInBits();
1450 unsigned AndBitWidth = And.getValueSizeInBits();
1451 if (BitWidth > AndBitWidth) {
1452 auto Known = DAG.computeKnownBits(Op0);
1453 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
1454 return SDValue();
1455 }
1456 LHS = Op1;
1457 RHS = Op0.getOperand(1);
1458 }
1459 } else if (auto *AndRHS = dyn_cast<ConstantSDNode>(Op1)) {
1460 uint64_t AndRHSVal = AndRHS->getZExtValue();
1461 SDValue AndLHS = Op0;
1462
1463 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
1464 LHS = AndLHS.getOperand(0);
1465 RHS = AndLHS.getOperand(1);
1466 }
1467
1468 // Use BT if the immediate can't be encoded in a TEST instruction.
1469 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
1470 LHS = AndLHS;
1471 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), DL, LHS.getValueType());
1472 }
1473 }
1474
1475 if (LHS.getNode())
1476 return getBitTestCondition(LHS, RHS, CC, DL, DAG);
1477
1478 return SDValue();
1479}
1480
1481static M68k::CondCode TranslateIntegerM68kCC(ISD::CondCode SetCCOpcode) {
1482 switch (SetCCOpcode) {
1483 default:
1484 llvm_unreachable("Invalid integer condition!")::llvm::llvm_unreachable_internal("Invalid integer condition!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1484)
;
1485 case ISD::SETEQ:
1486 return M68k::COND_EQ;
1487 case ISD::SETGT:
1488 return M68k::COND_GT;
1489 case ISD::SETGE:
1490 return M68k::COND_GE;
1491 case ISD::SETLT:
1492 return M68k::COND_LT;
1493 case ISD::SETLE:
1494 return M68k::COND_LE;
1495 case ISD::SETNE:
1496 return M68k::COND_NE;
1497 case ISD::SETULT:
1498 return M68k::COND_CS;
1499 case ISD::SETUGE:
1500 return M68k::COND_CC;
1501 case ISD::SETUGT:
1502 return M68k::COND_HI;
1503 case ISD::SETULE:
1504 return M68k::COND_LS;
1505 }
1506}
1507
1508/// Do a one-to-one translation of a ISD::CondCode to the M68k-specific
1509/// condition code, returning the condition code and the LHS/RHS of the
1510/// comparison to make.
1511static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
1512 bool IsFP, SDValue &LHS, SDValue &RHS,
1513 SelectionDAG &DAG) {
1514 if (!IsFP) {
1515 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
1516 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
1517 // X > -1 -> X == 0, jump !sign.
1518 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1519 return M68k::COND_PL;
1520 }
1521 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
1522 // X < 0 -> X == 0, jump on sign.
1523 return M68k::COND_MI;
1524 }
1525 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
1526 // X < 1 -> X <= 0
1527 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1528 return M68k::COND_LE;
1529 }
1530 }
1531
1532 return TranslateIntegerM68kCC(SetCCOpcode);
1533 }
1534
1535 // First determine if it is required or is profitable to flip the operands.
1536
1537 // If LHS is a foldable load, but RHS is not, flip the condition.
1538 if (ISD::isNON_EXTLoad(LHS.getNode()) && !ISD::isNON_EXTLoad(RHS.getNode())) {
1539 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
1540 std::swap(LHS, RHS);
1541 }
1542
1543 switch (SetCCOpcode) {
1544 default:
1545 break;
1546 case ISD::SETOLT:
1547 case ISD::SETOLE:
1548 case ISD::SETUGT:
1549 case ISD::SETUGE:
1550 std::swap(LHS, RHS);
1551 break;
1552 }
1553
1554 // On a floating point condition, the flags are set as follows:
1555 // ZF PF CF op
1556 // 0 | 0 | 0 | X > Y
1557 // 0 | 0 | 1 | X < Y
1558 // 1 | 0 | 0 | X == Y
1559 // 1 | 1 | 1 | unordered
1560 switch (SetCCOpcode) {
1561 default:
1562 llvm_unreachable("Condcode should be pre-legalized away")::llvm::llvm_unreachable_internal("Condcode should be pre-legalized away"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1562)
;
1563 case ISD::SETUEQ:
1564 case ISD::SETEQ:
1565 return M68k::COND_EQ;
1566 case ISD::SETOLT: // flipped
1567 case ISD::SETOGT:
1568 case ISD::SETGT:
1569 return M68k::COND_HI;
1570 case ISD::SETOLE: // flipped
1571 case ISD::SETOGE:
1572 case ISD::SETGE:
1573 return M68k::COND_CC;
1574 case ISD::SETUGT: // flipped
1575 case ISD::SETULT:
1576 case ISD::SETLT:
1577 return M68k::COND_CS;
1578 case ISD::SETUGE: // flipped
1579 case ISD::SETULE:
1580 case ISD::SETLE:
1581 return M68k::COND_LS;
1582 case ISD::SETONE:
1583 case ISD::SETNE:
1584 return M68k::COND_NE;
1585 case ISD::SETOEQ:
1586 case ISD::SETUNE:
1587 return M68k::COND_INVALID;
1588 }
1589}
1590
1591// Convert (truncate (srl X, N) to i1) to (bt X, N)
1592static SDValue LowerTruncateToBT(SDValue Op, ISD::CondCode CC, const SDLoc &DL,
1593 SelectionDAG &DAG) {
1594
1595 assert(Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 &&((Op.getOpcode() == ISD::TRUNCATE && Op.getValueType(
) == MVT::i1 && "Expected TRUNCATE to i1 node") ? static_cast
<void> (0) : __assert_fail ("Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 && \"Expected TRUNCATE to i1 node\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1596, __PRETTY_FUNCTION__))
1596 "Expected TRUNCATE to i1 node")((Op.getOpcode() == ISD::TRUNCATE && Op.getValueType(
) == MVT::i1 && "Expected TRUNCATE to i1 node") ? static_cast
<void> (0) : __assert_fail ("Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 && \"Expected TRUNCATE to i1 node\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1596, __PRETTY_FUNCTION__))
;
1597
1598 if (Op.getOperand(0).getOpcode() != ISD::SRL)
1599 return SDValue();
1600
1601 SDValue ShiftRight = Op.getOperand(0);
1602 return getBitTestCondition(ShiftRight.getOperand(0), ShiftRight.getOperand(1),
1603 CC, DL, DAG);
1604}
1605
1606/// \brief return true if \c Op has a use that doesn't just read flags.
1607static bool hasNonFlagsUse(SDValue Op) {
1608 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
1609 ++UI) {
1610 SDNode *User = *UI;
1611 unsigned UOpNo = UI.getOperandNo();
1612 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
1613 // Look pass truncate.
1614 UOpNo = User->use_begin().getOperandNo();
1615 User = *User->use_begin();
1616 }
1617
1618 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
1619 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
1620 return true;
1621 }
1622 return false;
1623}
1624
1625SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC,
1626 const SDLoc &DL, SelectionDAG &DAG) const {
1627
1628 // CF and OF aren't always set the way we want. Determine which
1629 // of these we need.
1630 bool NeedCF = false;
1631 bool NeedOF = false;
1632 switch (M68kCC) {
1633 default:
1634 break;
1635 case M68k::COND_HI:
1636 case M68k::COND_CC:
1637 case M68k::COND_CS:
1638 case M68k::COND_LS:
1639 NeedCF = true;
1640 break;
1641 case M68k::COND_GT:
1642 case M68k::COND_GE:
1643 case M68k::COND_LT:
1644 case M68k::COND_LE:
1645 case M68k::COND_VS:
1646 case M68k::COND_VC: {
1647 // Check if we really need to set the
1648 // Overflow flag. If NoSignedWrap is present
1649 // that is not actually needed.
1650 switch (Op->getOpcode()) {
1651 case ISD::ADD:
1652 case ISD::SUB:
1653 case ISD::MUL:
1654 case ISD::SHL: {
1655 if (Op.getNode()->getFlags().hasNoSignedWrap())
1656 break;
1657 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1658 }
1659 default:
1660 NeedOF = true;
1661 break;
1662 }
1663 break;
1664 }
1665 }
1666 // See if we can use the CCR value from the operand instead of
1667 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
1668 // we prove that the arithmetic won't overflow, we can't use OF or CF.
1669 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
1670 // Emit a CMP with 0, which is the TEST pattern.
1671 return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
1672 DAG.getConstant(0, DL, Op.getValueType()), Op);
1673 }
1674 unsigned Opcode = 0;
1675 unsigned NumOperands = 0;
1676
1677 // Truncate operations may prevent the merge of the SETCC instruction
1678 // and the arithmetic instruction before it. Attempt to truncate the operands
1679 // of the arithmetic instruction and use a reduced bit-width instruction.
1680 bool NeedTruncation = false;
1681 SDValue ArithOp = Op;
1682 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
1683 SDValue Arith = Op->getOperand(0);
1684 // Both the trunc and the arithmetic op need to have one user each.
1685 if (Arith->hasOneUse())
1686 switch (Arith.getOpcode()) {
1687 default:
1688 break;
1689 case ISD::ADD:
1690 case ISD::SUB:
1691 case ISD::AND:
1692 case ISD::OR:
1693 case ISD::XOR: {
1694 NeedTruncation = true;
1695 ArithOp = Arith;
1696 }
1697 }
1698 }
1699
1700 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
1701 // which may be the result of a CAST. We use the variable 'Op', which is the
1702 // non-casted variable when we check for possible users.
1703 switch (ArithOp.getOpcode()) {
1704 case ISD::ADD:
1705 Opcode = M68kISD::ADD;
1706 NumOperands = 2;
1707 break;
1708 case ISD::SHL:
1709 case ISD::SRL:
1710 // If we have a constant logical shift that's only used in a comparison
1711 // against zero turn it into an equivalent AND. This allows turning it into
1712 // a TEST instruction later.
1713 if ((M68kCC == M68k::COND_EQ || M68kCC == M68k::COND_NE) &&
1714 Op->hasOneUse() && isa<ConstantSDNode>(Op->getOperand(1)) &&
1715 !hasNonFlagsUse(Op)) {
1716 EVT VT = Op.getValueType();
1717 unsigned BitWidth = VT.getSizeInBits();
1718 unsigned ShAmt = Op->getConstantOperandVal(1);
1719 if (ShAmt >= BitWidth) // Avoid undefined shifts.
1720 break;
1721 APInt Mask = ArithOp.getOpcode() == ISD::SRL
1722 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
1723 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
1724 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
1725 break;
1726 Op = DAG.getNode(ISD::AND, DL, VT, Op->getOperand(0),
1727 DAG.getConstant(Mask, DL, VT));
1728 }
1729 break;
1730
1731 case ISD::AND:
1732 // If the primary 'and' result isn't used, don't bother using
1733 // M68kISD::AND, because a TEST instruction will be better.
1734 if (!hasNonFlagsUse(Op)) {
1735 SDValue Op0 = ArithOp->getOperand(0);
1736 SDValue Op1 = ArithOp->getOperand(1);
1737 EVT VT = ArithOp.getValueType();
1738 bool IsAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
1739 bool IsLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
1740
1741 // But if we can combine this into an ANDN operation, then create an AND
1742 // now and allow it to be pattern matched into an ANDN.
1743 if (/*!Subtarget.hasBMI() ||*/ !IsAndn || !IsLegalAndnType)
1744 break;
1745 }
1746 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1747 case ISD::SUB:
1748 case ISD::OR:
1749 case ISD::XOR:
1750 // Due to the ISEL shortcoming noted above, be conservative if this op is
1751 // likely to be selected as part of a load-modify-store instruction.
1752 for (const auto *U : Op.getNode()->uses())
1753 if (U->getOpcode() == ISD::STORE)
1754 goto default_case;
1755
1756 // Otherwise use a regular CCR-setting instruction.
1757 switch (ArithOp.getOpcode()) {
1758 default:
1759 llvm_unreachable("unexpected operator!")::llvm::llvm_unreachable_internal("unexpected operator!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1759)
;
1760 case ISD::SUB:
1761 Opcode = M68kISD::SUB;
1762 break;
1763 case ISD::XOR:
1764 Opcode = M68kISD::XOR;
1765 break;
1766 case ISD::AND:
1767 Opcode = M68kISD::AND;
1768 break;
1769 case ISD::OR:
1770 Opcode = M68kISD::OR;
1771 break;
1772 }
1773
1774 NumOperands = 2;
1775 break;
1776 case M68kISD::ADD:
1777 case M68kISD::SUB:
1778 case M68kISD::OR:
1779 case M68kISD::XOR:
1780 case M68kISD::AND:
1781 return SDValue(Op.getNode(), 1);
1782 default:
1783 default_case:
1784 break;
1785 }
1786
1787 // If we found that truncation is beneficial, perform the truncation and
1788 // update 'Op'.
1789 if (NeedTruncation) {
1790 EVT VT = Op.getValueType();
1791 SDValue WideVal = Op->getOperand(0);
1792 EVT WideVT = WideVal.getValueType();
1793 unsigned ConvertedOp = 0;
1794 // Use a target machine opcode to prevent further DAGCombine
1795 // optimizations that may separate the arithmetic operations
1796 // from the setcc node.
1797 switch (WideVal.getOpcode()) {
1798 default:
1799 break;
1800 case ISD::ADD:
1801 ConvertedOp = M68kISD::ADD;
1802 break;
1803 case ISD::SUB:
1804 ConvertedOp = M68kISD::SUB;
1805 break;
1806 case ISD::AND:
1807 ConvertedOp = M68kISD::AND;
1808 break;
1809 case ISD::OR:
1810 ConvertedOp = M68kISD::OR;
1811 break;
1812 case ISD::XOR:
1813 ConvertedOp = M68kISD::XOR;
1814 break;
1815 }
1816
1817 if (ConvertedOp) {
1818 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1819 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
1820 SDValue V0 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(0));
1821 SDValue V1 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(1));
1822 Op = DAG.getNode(ConvertedOp, DL, VT, V0, V1);
1823 }
1824 }
1825 }
1826
1827 if (Opcode == 0) {
1828 // Emit a CMP with 0, which is the TEST pattern.
1829 return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
1830 DAG.getConstant(0, DL, Op.getValueType()), Op);
1831 }
1832 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i8);
1833 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
1834
1835 SDValue New = DAG.getNode(Opcode, DL, VTs, Ops);
1836 DAG.ReplaceAllUsesWith(Op, New);
1837 return SDValue(New.getNode(), 1);
1838}
1839
1840/// \brief Return true if the condition is an unsigned comparison operation.
1841static bool isM68kCCUnsigned(unsigned M68kCC) {
1842 switch (M68kCC) {
1843 default:
1844 llvm_unreachable("Invalid integer condition!")::llvm::llvm_unreachable_internal("Invalid integer condition!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1844)
;
1845 case M68k::COND_EQ:
1846 case M68k::COND_NE:
1847 case M68k::COND_CS:
1848 case M68k::COND_HI:
1849 case M68k::COND_LS:
1850 case M68k::COND_CC:
1851 return true;
1852 case M68k::COND_GT:
1853 case M68k::COND_GE:
1854 case M68k::COND_LT:
1855 case M68k::COND_LE:
1856 return false;
1857 }
1858}
1859
1860SDValue M68kTargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC,
1861 const SDLoc &DL, SelectionDAG &DAG) const {
1862 if (isNullConstant(Op1))
1863 return EmitTest(Op0, M68kCC, DL, DAG);
1864
1865 assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&((!(isa<ConstantSDNode>(Op1) && Op0.getValueType
() == MVT::i1) && "Unexpected comparison operation for MVT::i1 operands"
) ? static_cast<void> (0) : __assert_fail ("!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) && \"Unexpected comparison operation for MVT::i1 operands\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1866, __PRETTY_FUNCTION__))
1866 "Unexpected comparison operation for MVT::i1 operands")((!(isa<ConstantSDNode>(Op1) && Op0.getValueType
() == MVT::i1) && "Unexpected comparison operation for MVT::i1 operands"
) ? static_cast<void> (0) : __assert_fail ("!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) && \"Unexpected comparison operation for MVT::i1 operands\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1866, __PRETTY_FUNCTION__))
;
1867
1868 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
1869 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
1870 // Only promote the compare up to I32 if it is a 16 bit operation
1871 // with an immediate. 16 bit immediates are to be avoided.
1872 if ((Op0.getValueType() == MVT::i16 &&
1873 (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
1874 !DAG.getMachineFunction().getFunction().hasMinSize()) {
1875 unsigned ExtendOp =
1876 isM68kCCUnsigned(M68kCC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
1877 Op0 = DAG.getNode(ExtendOp, DL, MVT::i32, Op0);
1878 Op1 = DAG.getNode(ExtendOp, DL, MVT::i32, Op1);
1879 }
1880 // Use SUB instead of CMP to enable CSE between SUB and CMP.
1881 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i8);
1882 SDValue Sub = DAG.getNode(M68kISD::SUB, DL, VTs, Op0, Op1);
1883 return SDValue(Sub.getNode(), 1);
1884 }
1885 return DAG.getNode(M68kISD::CMP, DL, MVT::i8, Op0, Op1);
1886}
1887
1888/// Result of 'and' or 'trunc to i1' is compared against zero.
1889/// Change to a BT node if possible.
1890SDValue M68kTargetLowering::LowerToBT(SDValue Op, ISD::CondCode CC,
1891 const SDLoc &DL,
1892 SelectionDAG &DAG) const {
1893 if (Op.getOpcode() == ISD::AND)
1894 return LowerAndToBT(Op, CC, DL, DAG);
1895 if (Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1)
1896 return LowerTruncateToBT(Op, CC, DL, DAG);
1897 return SDValue();
1898}
1899
1900SDValue M68kTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1901 MVT VT = Op.getSimpleValueType();
1902 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer")((VT == MVT::i8 && "SetCC type must be 8-bit integer"
) ? static_cast<void> (0) : __assert_fail ("VT == MVT::i8 && \"SetCC type must be 8-bit integer\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1902, __PRETTY_FUNCTION__))
;
1903
1904 SDValue Op0 = Op.getOperand(0);
1905 SDValue Op1 = Op.getOperand(1);
1906 SDLoc DL(Op);
1907 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1908
1909 // Optimize to BT if possible.
1910 // Lower (X & (1 << N)) == 0 to BT(X, N).
1911 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
1912 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
1913 // Lower (trunc (X >> N) to i1) to BT(X, N).
1914 if (Op0.hasOneUse() && isNullConstant(Op1) &&
1915 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1916 if (SDValue NewSetCC = LowerToBT(Op0, CC, DL, DAG)) {
1917 if (VT == MVT::i1)
1918 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, NewSetCC);
1919 return NewSetCC;
1920 }
1921 }
1922
1923 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
1924 // these.
1925 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
1926 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1927
1928 // If the input is a setcc, then reuse the input setcc or use a new one with
1929 // the inverted condition.
1930 if (Op0.getOpcode() == M68kISD::SETCC) {
1931 M68k::CondCode CCode = (M68k::CondCode)Op0.getConstantOperandVal(0);
1932 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
1933 if (!Invert)
1934 return Op0;
1935
1936 CCode = M68k::GetOppositeBranchCondition(CCode);
1937 SDValue SetCC =
1938 DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
1939 DAG.getConstant(CCode, DL, MVT::i8), Op0.getOperand(1));
1940 if (VT == MVT::i1)
1941 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
1942 return SetCC;
1943 }
1944 }
1945 if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1946 if (isOneConstant(Op1)) {
1947 ISD::CondCode NewCC = ISD::GlobalISel::getSetCCInverse(CC, true);
1948 return DAG.getSetCC(DL, VT, Op0, DAG.getConstant(0, DL, MVT::i1), NewCC);
1949 }
1950 if (!isNullConstant(Op1)) {
1951 SDValue Xor = DAG.getNode(ISD::XOR, DL, MVT::i1, Op0, Op1);
1952 return DAG.getSetCC(DL, VT, Xor, DAG.getConstant(0, DL, MVT::i1), CC);
1953 }
1954 }
1955
1956 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
1957 unsigned M68kCC = TranslateM68kCC(CC, DL, IsFP, Op0, Op1, DAG);
1958 if (M68kCC == M68k::COND_INVALID)
1959 return SDValue();
1960
1961 SDValue CCR = EmitCmp(Op0, Op1, M68kCC, DL, DAG);
1962 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
1963 DAG.getConstant(M68kCC, DL, MVT::i8), CCR);
1964}
1965
1966SDValue M68kTargetLowering::LowerSETCCCARRY(SDValue Op,
1967 SelectionDAG &DAG) const {
1968 SDValue LHS = Op.getOperand(0);
1969 SDValue RHS = Op.getOperand(1);
1970 SDValue Carry = Op.getOperand(2);
1971 SDValue Cond = Op.getOperand(3);
1972 SDLoc DL(Op);
1973
1974 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.")((LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only."
) ? static_cast<void> (0) : __assert_fail ("LHS.getSimpleValueType().isInteger() && \"SETCCCARRY is integer only.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 1974, __PRETTY_FUNCTION__))
;
1975 M68k::CondCode CC = TranslateIntegerM68kCC(cast<CondCodeSDNode>(Cond)->get());
1976
1977 EVT CarryVT = Carry.getValueType();
1978 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
1979 Carry = DAG.getNode(M68kISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), Carry,
1980 DAG.getConstant(NegOne, DL, CarryVT));
1981
1982 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
1983 SDValue Cmp =
1984 DAG.getNode(M68kISD::SUBX, DL, VTs, LHS, RHS, Carry.getValue(1));
1985
1986 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
1987 DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
1988}
1989
1990/// Return true if opcode is a M68k logical comparison.
1991static bool isM68kLogicalCmp(SDValue Op) {
1992 unsigned Opc = Op.getNode()->getOpcode();
1993 if (Opc == M68kISD::CMP)
1994 return true;
1995 if (Op.getResNo() == 1 &&
1996 (Opc == M68kISD::ADD || Opc == M68kISD::SUB || Opc == M68kISD::ADDX ||
1997 Opc == M68kISD::SUBX || Opc == M68kISD::SMUL || Opc == M68kISD::UMUL ||
1998 Opc == M68kISD::OR || Opc == M68kISD::XOR || Opc == M68kISD::AND))
1999 return true;
2000
2001 if (Op.getResNo() == 2 && Opc == M68kISD::UMUL)
2002 return true;
2003
2004 return false;
2005}
2006
2007static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
2008 if (V.getOpcode() != ISD::TRUNCATE)
2009 return false;
2010
2011 SDValue VOp0 = V.getOperand(0);
2012 unsigned InBits = VOp0.getValueSizeInBits();
2013 unsigned Bits = V.getValueSizeInBits();
2014 return DAG.MaskedValueIsZero(VOp0,
2015 APInt::getHighBitsSet(InBits, InBits - Bits));
2016}
2017
2018SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2019 bool addTest = true;
2020 SDValue Cond = Op.getOperand(0);
2021 SDValue Op1 = Op.getOperand(1);
2022 SDValue Op2 = Op.getOperand(2);
2023 SDLoc DL(Op);
2024 SDValue CC;
2025
2026 if (Cond.getOpcode() == ISD::SETCC) {
2027 if (SDValue NewCond = LowerSETCC(Cond, DAG))
2028 Cond = NewCond;
2029 }
2030
2031 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2032 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
2033 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
2034 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
2035 if (Cond.getOpcode() == M68kISD::SETCC &&
2036 Cond.getOperand(1).getOpcode() == M68kISD::CMP &&
2037 isNullConstant(Cond.getOperand(1).getOperand(0))) {
2038 SDValue Cmp = Cond.getOperand(1);
2039
2040 unsigned CondCode =
2041 cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
2042
2043 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2044 (CondCode == M68k::COND_EQ || CondCode == M68k::COND_NE)) {
2045 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
2046
2047 SDValue CmpOp0 = Cmp.getOperand(1);
2048 // Apply further optimizations for special cases
2049 // (select (x != 0), -1, 0) -> neg & sbb
2050 // (select (x == 0), 0, -1) -> neg & sbb
2051 if (isNullConstant(Y) &&
2052 (isAllOnesConstant(Op1) == (CondCode == M68k::COND_NE))) {
2053
2054 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
2055
2056 SDValue Neg =
2057 DAG.getNode(M68kISD::SUB, DL, VTs,
2058 DAG.getConstant(0, DL, CmpOp0.getValueType()), CmpOp0);
2059
2060 SDValue Res = DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2061 DAG.getConstant(M68k::COND_CS, DL, MVT::i8),
2062 SDValue(Neg.getNode(), 1));
2063 return Res;
2064 }
2065
2066 Cmp = DAG.getNode(M68kISD::CMP, DL, MVT::i8,
2067 DAG.getConstant(1, DL, CmpOp0.getValueType()), CmpOp0);
2068
2069 SDValue Res = // Res = 0 or -1.
2070 DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2071 DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cmp);
2072
2073 if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_EQ))
2074 Res = DAG.getNOT(DL, Res, Res.getValueType());
2075
2076 if (!isNullConstant(Op2))
2077 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
2078 return Res;
2079 }
2080 }
2081
2082 // Look past (and (setcc_carry (cmp ...)), 1).
2083 if (Cond.getOpcode() == ISD::AND &&
2084 Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2085 isOneConstant(Cond.getOperand(1)))
2086 Cond = Cond.getOperand(0);
2087
2088 // If condition flag is set by a M68kISD::CMP, then use it as the condition
2089 // setting operand in place of the M68kISD::SETCC.
2090 unsigned CondOpcode = Cond.getOpcode();
2091 if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2092 CC = Cond.getOperand(0);
2093
2094 SDValue Cmp = Cond.getOperand(1);
2095 unsigned Opc = Cmp.getOpcode();
2096
2097 bool IllegalFPCMov = false;
2098
2099 if ((isM68kLogicalCmp(Cmp) && !IllegalFPCMov) || Opc == M68kISD::BT) {
2100 Cond = Cmp;
2101 addTest = false;
2102 }
2103 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
2104 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
2105 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
2106 SDValue LHS = Cond.getOperand(0);
2107 SDValue RHS = Cond.getOperand(1);
2108 unsigned MxOpcode;
2109 unsigned MxCond;
2110 SDVTList VTs;
2111 switch (CondOpcode) {
2112 case ISD::UADDO:
2113 MxOpcode = M68kISD::ADD;
2114 MxCond = M68k::COND_CS;
2115 break;
2116 case ISD::SADDO:
2117 MxOpcode = M68kISD::ADD;
2118 MxCond = M68k::COND_VS;
2119 break;
2120 case ISD::USUBO:
2121 MxOpcode = M68kISD::SUB;
2122 MxCond = M68k::COND_CS;
2123 break;
2124 case ISD::SSUBO:
2125 MxOpcode = M68kISD::SUB;
2126 MxCond = M68k::COND_VS;
2127 break;
2128 case ISD::UMULO:
2129 MxOpcode = M68kISD::UMUL;
2130 MxCond = M68k::COND_VS;
2131 break;
2132 case ISD::SMULO:
2133 MxOpcode = M68kISD::SMUL;
2134 MxCond = M68k::COND_VS;
2135 break;
2136 default:
2137 llvm_unreachable("unexpected overflowing operator")::llvm::llvm_unreachable_internal("unexpected overflowing operator"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 2137)
;
2138 }
2139 if (CondOpcode == ISD::UMULO)
2140 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), MVT::i32);
2141 else
2142 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
2143
2144 SDValue MxOp = DAG.getNode(MxOpcode, DL, VTs, LHS, RHS);
2145
2146 if (CondOpcode == ISD::UMULO)
2147 Cond = MxOp.getValue(2);
2148 else
2149 Cond = MxOp.getValue(1);
2150
2151 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2152 addTest = false;
2153 }
2154
2155 if (addTest) {
2156 // Look past the truncate if the high bits are known zero.
2157 if (isTruncWithZeroHighBitsInput(Cond, DAG))
2158 Cond = Cond.getOperand(0);
2159
2160 // We know the result of AND is compared against zero. Try to match
2161 // it to BT.
2162 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
2163 if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) {
2164 CC = NewSetCC.getOperand(0);
2165 Cond = NewSetCC.getOperand(1);
2166 addTest = false;
2167 }
2168 }
2169 }
2170
2171 if (addTest) {
2172 CC = DAG.getConstant(M68k::COND_NE, DL, MVT::i8);
2173 Cond = EmitTest(Cond, M68k::COND_NE, DL, DAG);
2174 }
2175
2176 // a < b ? -1 : 0 -> RES = ~setcc_carry
2177 // a < b ? 0 : -1 -> RES = setcc_carry
2178 // a >= b ? -1 : 0 -> RES = setcc_carry
2179 // a >= b ? 0 : -1 -> RES = ~setcc_carry
2180 if (Cond.getOpcode() == M68kISD::SUB) {
2181 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
2182
2183 if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
2184 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2185 (isNullConstant(Op1) || isNullConstant(Op2))) {
2186 SDValue Res =
2187 DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2188 DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cond);
2189 if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_CS))
2190 return DAG.getNOT(DL, Res, Res.getValueType());
2191 return Res;
2192 }
2193 }
2194
2195 // M68k doesn't have an i8 cmov. If both operands are the result of a
2196 // truncate widen the cmov and push the truncate through. This avoids
2197 // introducing a new branch during isel and doesn't add any extensions.
2198 if (Op.getValueType() == MVT::i8 && Op1.getOpcode() == ISD::TRUNCATE &&
2199 Op2.getOpcode() == ISD::TRUNCATE) {
2200 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
2201 if (T1.getValueType() == T2.getValueType() &&
2202 // Blacklist CopyFromReg to avoid partial register stalls.
2203 T1.getOpcode() != ISD::CopyFromReg &&
2204 T2.getOpcode() != ISD::CopyFromReg) {
2205 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
2206 SDValue Cmov = DAG.getNode(M68kISD::CMOV, DL, VTs, T2, T1, CC, Cond);
2207 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
2208 }
2209 }
2210
2211 // M68kISD::CMOV means set the result (which is operand 1) to the RHS if
2212 // condition is true.
2213 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
2214 SDValue Ops[] = {Op2, Op1, CC, Cond};
2215 return DAG.getNode(M68kISD::CMOV, DL, VTs, Ops);
2216}
2217
2218/// Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes
2219/// each of which has no other use apart from the AND / OR.
2220static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
2221 Opc = Op.getOpcode();
2222 if (Opc != ISD::OR && Opc != ISD::AND)
2223 return false;
2224 return (M68k::IsSETCC(Op.getOperand(0).getOpcode()) &&
2225 Op.getOperand(0).hasOneUse() &&
2226 M68k::IsSETCC(Op.getOperand(1).getOpcode()) &&
2227 Op.getOperand(1).hasOneUse());
2228}
2229
2230/// Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the
2231/// SETCC node has a single use.
2232static bool isXor1OfSetCC(SDValue Op) {
2233 if (Op.getOpcode() != ISD::XOR)
2234 return false;
2235 if (isOneConstant(Op.getOperand(1)))
2236 return Op.getOperand(0).getOpcode() == M68kISD::SETCC &&
2237 Op.getOperand(0).hasOneUse();
2238 return false;
2239}
2240
2241SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2242 bool AddTest = true;
2243 SDValue Chain = Op.getOperand(0);
2244 SDValue Cond = Op.getOperand(1);
2245 SDValue Dest = Op.getOperand(2);
2246 SDLoc DL(Op);
2247 SDValue CC;
2248 bool Inverted = false;
2249
2250 if (Cond.getOpcode() == ISD::SETCC) {
2251 // Check for setcc([su]{add,sub}o == 0).
2252 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
2253 isNullConstant(Cond.getOperand(1)) &&
2254 Cond.getOperand(0).getResNo() == 1 &&
2255 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
2256 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
2257 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
2258 Cond.getOperand(0).getOpcode() == ISD::USUBO)) {
2259 Inverted = true;
2260 Cond = Cond.getOperand(0);
2261 } else {
2262 if (SDValue NewCond = LowerSETCC(Cond, DAG))
2263 Cond = NewCond;
2264 }
2265 }
2266
2267 // Look pass (and (setcc_carry (cmp ...)), 1).
2268 if (Cond.getOpcode() == ISD::AND &&
2269 Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2270 isOneConstant(Cond.getOperand(1)))
2271 Cond = Cond.getOperand(0);
2272
2273 // If condition flag is set by a M68kISD::CMP, then use it as the condition
2274 // setting operand in place of the M68kISD::SETCC.
2275 unsigned CondOpcode = Cond.getOpcode();
2276 if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2277 CC = Cond.getOperand(0);
2278
2279 SDValue Cmp = Cond.getOperand(1);
2280 unsigned Opc = Cmp.getOpcode();
2281
2282 if (isM68kLogicalCmp(Cmp) || Opc == M68kISD::BT) {
2283 Cond = Cmp;
2284 AddTest = false;
2285 } else {
2286 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
2287 default:
2288 break;
2289 case M68k::COND_VS:
2290 case M68k::COND_CS:
2291 // These can only come from an arithmetic instruction with overflow,
2292 // e.g. SADDO, UADDO.
2293 Cond = Cond.getNode()->getOperand(1);
2294 AddTest = false;
2295 break;
2296 }
2297 }
2298 }
2299 CondOpcode = Cond.getOpcode();
2300 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
2301 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO) {
2302 SDValue LHS = Cond.getOperand(0);
2303 SDValue RHS = Cond.getOperand(1);
2304 unsigned MxOpcode;
2305 unsigned MxCond;
2306 SDVTList VTs;
2307 // Keep this in sync with LowerXALUO, otherwise we might create redundant
2308 // instructions that can't be removed afterwards (i.e. M68kISD::ADD and
2309 // M68kISD::INC).
2310 switch (CondOpcode) {
2311 case ISD::UADDO:
2312 MxOpcode = M68kISD::ADD;
2313 MxCond = M68k::COND_CS;
2314 break;
2315 case ISD::SADDO:
2316 MxOpcode = M68kISD::ADD;
2317 MxCond = M68k::COND_VS;
2318 break;
2319 case ISD::USUBO:
2320 MxOpcode = M68kISD::SUB;
2321 MxCond = M68k::COND_CS;
2322 break;
2323 case ISD::SSUBO:
2324 MxOpcode = M68kISD::SUB;
2325 MxCond = M68k::COND_VS;
2326 break;
2327 case ISD::UMULO:
2328 MxOpcode = M68kISD::UMUL;
2329 MxCond = M68k::COND_VS;
2330 break;
2331 case ISD::SMULO:
2332 MxOpcode = M68kISD::SMUL;
2333 MxCond = M68k::COND_VS;
2334 break;
2335 default:
2336 llvm_unreachable("unexpected overflowing operator")::llvm::llvm_unreachable_internal("unexpected overflowing operator"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 2336)
;
2337 }
2338
2339 if (Inverted)
2340 MxCond = M68k::GetOppositeBranchCondition((M68k::CondCode)MxCond);
2341
2342 if (CondOpcode == ISD::UMULO)
2343 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), MVT::i8);
2344 else
2345 VTs = DAG.getVTList(LHS.getValueType(), MVT::i8);
2346
2347 SDValue MxOp = DAG.getNode(MxOpcode, DL, VTs, LHS, RHS);
2348
2349 if (CondOpcode == ISD::UMULO)
2350 Cond = MxOp.getValue(2);
2351 else
2352 Cond = MxOp.getValue(1);
2353
2354 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2355 AddTest = false;
2356 } else {
2357 unsigned CondOpc;
2358 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
2359 SDValue Cmp = Cond.getOperand(0).getOperand(1);
2360 if (CondOpc == ISD::OR) {
2361 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
2362 // two branches instead of an explicit OR instruction with a
2363 // separate test.
2364 if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp)) {
2365 CC = Cond.getOperand(0).getOperand(0);
2366 Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2367 Dest, CC, Cmp);
2368 CC = Cond.getOperand(1).getOperand(0);
2369 Cond = Cmp;
2370 AddTest = false;
2371 }
2372 } else { // ISD::AND
2373 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
2374 // two branches instead of an explicit AND instruction with a
2375 // separate test. However, we only do this if this block doesn't
2376 // have a fall-through edge, because this requires an explicit
2377 // jmp when the condition is false.
2378 if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp) &&
2379 Op.getNode()->hasOneUse()) {
2380 M68k::CondCode CCode =
2381 (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2382 CCode = M68k::GetOppositeBranchCondition(CCode);
2383 CC = DAG.getConstant(CCode, DL, MVT::i8);
2384 SDNode *User = *Op.getNode()->use_begin();
2385 // Look for an unconditional branch following this conditional branch.
2386 // We need this because we need to reverse the successors in order
2387 // to implement FCMP_OEQ.
2388 if (User->getOpcode() == ISD::BR) {
2389 SDValue FalseBB = User->getOperand(1);
2390 SDNode *NewBR =
2391 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
2392 assert(NewBR == User)((NewBR == User) ? static_cast<void> (0) : __assert_fail
("NewBR == User", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 2392, __PRETTY_FUNCTION__))
;
2393 (void)NewBR;
2394 Dest = FalseBB;
2395
2396 Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2397 Dest, CC, Cmp);
2398 M68k::CondCode CCode =
2399 (M68k::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
2400 CCode = M68k::GetOppositeBranchCondition(CCode);
2401 CC = DAG.getConstant(CCode, DL, MVT::i8);
2402 Cond = Cmp;
2403 AddTest = false;
2404 }
2405 }
2406 }
2407 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
2408 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
2409 // It should be transformed during dag combiner except when the condition
2410 // is set by a arithmetics with overflow node.
2411 M68k::CondCode CCode =
2412 (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2413 CCode = M68k::GetOppositeBranchCondition(CCode);
2414 CC = DAG.getConstant(CCode, DL, MVT::i8);
2415 Cond = Cond.getOperand(0).getOperand(1);
2416 AddTest = false;
2417 }
2418 }
2419
2420 if (AddTest) {
2421 // Look pass the truncate if the high bits are known zero.
2422 if (isTruncWithZeroHighBitsInput(Cond, DAG))
2423 Cond = Cond.getOperand(0);
2424
2425 // We know the result is compared against zero. Try to match it to BT.
2426 if (Cond.hasOneUse()) {
2427 if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) {
2428 CC = NewSetCC.getOperand(0);
2429 Cond = NewSetCC.getOperand(1);
2430 AddTest = false;
2431 }
2432 }
2433 }
2434
2435 if (AddTest) {
2436 M68k::CondCode MxCond = Inverted ? M68k::COND_EQ : M68k::COND_NE;
2437 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2438 Cond = EmitTest(Cond, MxCond, DL, DAG);
2439 }
2440 return DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain, Dest, CC,
2441 Cond);
2442}
2443
2444SDValue M68kTargetLowering::LowerADDC_ADDE_SUBC_SUBE(SDValue Op,
2445 SelectionDAG &DAG) const {
2446 MVT VT = Op.getNode()->getSimpleValueType(0);
2447
2448 // Let legalize expand this if it isn't a legal type yet.
2449 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
2450 return SDValue();
2451
2452 SDVTList VTs = DAG.getVTList(VT, MVT::i8);
2453
2454 unsigned Opc;
2455 bool ExtraOp = false;
2456 switch (Op.getOpcode()) {
2457 default:
2458 llvm_unreachable("Invalid code")::llvm::llvm_unreachable_internal("Invalid code", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 2458)
;
2459 case ISD::ADDC:
2460 Opc = M68kISD::ADD;
2461 break;
2462 case ISD::ADDE:
2463 Opc = M68kISD::ADDX;
2464 ExtraOp = true;
2465 break;
2466 case ISD::SUBC:
2467 Opc = M68kISD::SUB;
2468 break;
2469 case ISD::SUBE:
2470 Opc = M68kISD::SUBX;
2471 ExtraOp = true;
2472 break;
2473 }
2474
2475 if (!ExtraOp)
2476 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1));
2477 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1),
2478 Op.getOperand(2));
2479}
2480
2481// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2482// their target countpart wrapped in the M68kISD::Wrapper node. Suppose N is
2483// one of the above mentioned nodes. It has to be wrapped because otherwise
2484// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2485// be used to form addressing mode. These wrapped nodes will be selected
2486// into MOV32ri.
2487SDValue M68kTargetLowering::LowerConstantPool(SDValue Op,
2488 SelectionDAG &DAG) const {
2489 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2490
2491 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2492 // global base reg.
2493 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2494
2495 unsigned WrapperKind = M68kISD::Wrapper;
2496 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2497 WrapperKind = M68kISD::WrapperPC;
2498 }
2499
2500 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2501 SDValue Result = DAG.getTargetConstantPool(
2502 CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
2503
2504 SDLoc DL(CP);
2505 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2506
2507 // With PIC, the address is actually $g + Offset.
2508 if (M68kII::isGlobalRelativeToPICBase(OpFlag)) {
2509 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2510 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2511 Result);
2512 }
2513
2514 return Result;
2515}
2516
2517SDValue M68kTargetLowering::LowerExternalSymbol(SDValue Op,
2518 SelectionDAG &DAG) const {
2519 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
2520
2521 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2522 // global base reg.
2523 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
2524 unsigned char OpFlag = Subtarget.classifyExternalReference(*Mod);
2525
2526 unsigned WrapperKind = M68kISD::Wrapper;
2527 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2528 WrapperKind = M68kISD::WrapperPC;
2529 }
2530
2531 auto PtrVT = getPointerTy(DAG.getDataLayout());
2532 SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
2533
2534 SDLoc DL(Op);
2535 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2536
2537 // With PIC, the address is actually $g + Offset.
2538 if (M68kII::isGlobalRelativeToPICBase(OpFlag)) {
2539 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2540 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2541 Result);
2542 }
2543
2544 // For symbols that require a load from a stub to get the address, emit the
2545 // load.
2546 if (M68kII::isGlobalStubReference(OpFlag)) {
2547 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2548 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2549 }
2550
2551 return Result;
2552}
2553
2554SDValue M68kTargetLowering::LowerBlockAddress(SDValue Op,
2555 SelectionDAG &DAG) const {
2556 unsigned char OpFlags = Subtarget.classifyBlockAddressReference();
2557 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2558 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
2559 SDLoc DL(Op);
2560 auto PtrVT = getPointerTy(DAG.getDataLayout());
2561
2562 // Create the TargetBlockAddressAddress node.
2563 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
2564
2565 if (M68kII::isPCRelBlockReference(OpFlags)) {
2566 Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2567 } else {
2568 Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2569 }
2570
2571 // With PIC, the address is actually $g + Offset.
2572 if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2573 Result =
2574 DAG.getNode(ISD::ADD, DL, PtrVT,
2575 DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2576 }
2577
2578 return Result;
2579}
2580
2581SDValue M68kTargetLowering::LowerGlobalAddress(const GlobalValue *GV,
2582 const SDLoc &DL, int64_t Offset,
2583 SelectionDAG &DAG) const {
2584 unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
2585 auto PtrVT = getPointerTy(DAG.getDataLayout());
2586
2587 // Create the TargetGlobalAddress node, folding in the constant
2588 // offset if it is legal.
2589 SDValue Result;
2590 if (M68kII::isDirectGlobalReference(OpFlags)) {
2591 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset);
2592 Offset = 0;
2593 } else {
2594 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2595 }
2596
2597 if (M68kII::isPCRelGlobalReference(OpFlags))
2598 Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2599 else
2600 Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2601
2602 // With PIC, the address is actually $g + Offset.
2603 if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2604 Result =
2605 DAG.getNode(ISD::ADD, DL, PtrVT,
2606 DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2607 }
2608
2609 // For globals that require a load from a stub to get the address, emit the
2610 // load.
2611 if (M68kII::isGlobalStubReference(OpFlags)) {
2612 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2613 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2614 }
2615
2616 // If there was a non-zero offset that we didn't fold, create an explicit
2617 // addition for it.
2618 if (Offset != 0) {
2619 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2620 DAG.getConstant(Offset, DL, PtrVT));
2621 }
2622
2623 return Result;
2624}
2625
2626SDValue M68kTargetLowering::LowerGlobalAddress(SDValue Op,
2627 SelectionDAG &DAG) const {
2628 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2629 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
2630 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
2631}
2632
2633//===----------------------------------------------------------------------===//
2634// Custom Lower Jump Table
2635//===----------------------------------------------------------------------===//
2636
2637SDValue M68kTargetLowering::LowerJumpTable(SDValue Op,
2638 SelectionDAG &DAG) const {
2639 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2640
2641 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2642 // global base reg.
2643 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2644
2645 unsigned WrapperKind = M68kISD::Wrapper;
2646 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2647 WrapperKind = M68kISD::WrapperPC;
2648 }
2649
2650 auto PtrVT = getPointerTy(DAG.getDataLayout());
2651 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
2652 SDLoc DL(JT);
2653 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2654
2655 // With PIC, the address is actually $g + Offset.
2656 if (M68kII::isGlobalRelativeToPICBase(OpFlag)) {
2657 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2658 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2659 Result);
2660 }
2661
2662 return Result;
2663}
2664
2665unsigned M68kTargetLowering::getJumpTableEncoding() const {
2666 return Subtarget.getJumpTableEncoding();
2667}
2668
2669const MCExpr *M68kTargetLowering::LowerCustomJumpTableEntry(
2670 const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
2671 unsigned uid, MCContext &Ctx) const {
2672 return MCSymbolRefExpr::create(MBB->getSymbol(), MCSymbolRefExpr::VK_GOTOFF,
2673 Ctx);
2674}
2675
2676SDValue M68kTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2677 SelectionDAG &DAG) const {
2678 if (getJumpTableEncoding() == MachineJumpTableInfo::EK_Custom32)
2679 return DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(),
2680 getPointerTy(DAG.getDataLayout()));
2681
2682 // MachineJumpTableInfo::EK_LabelDifference32 entry
2683 return Table;
2684}
2685
2686// NOTE This only used for MachineJumpTableInfo::EK_LabelDifference32 entries
2687const MCExpr *M68kTargetLowering::getPICJumpTableRelocBaseExpr(
2688 const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const {
2689 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
2690}
2691
2692/// Determines whether the callee is required to pop its own arguments.
2693/// Callee pop is necessary to support tail calls.
2694bool M68k::isCalleePop(CallingConv::ID CallingConv, bool IsVarArg,
2695 bool GuaranteeTCO) {
2696 return false;
2697}
2698
2699// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
2700// together with other CMOV pseudo-opcodes into a single basic-block with
2701// conditional jump around it.
2702static bool isCMOVPseudo(MachineInstr &MI) {
2703 switch (MI.getOpcode()) {
2704 case M68k::CMOV8d:
2705 case M68k::CMOV16d:
2706 case M68k::CMOV32r:
2707 return true;
2708
2709 default:
2710 return false;
2711 }
2712}
2713
2714// The CCR operand of SelectItr might be missing a kill marker
2715// because there were multiple uses of CCR, and ISel didn't know
2716// which to mark. Figure out whether SelectItr should have had a
2717// kill marker, and set it if it should. Returns the correct kill
2718// marker value.
2719static bool checkAndUpdateCCRKill(MachineBasicBlock::iterator SelectItr,
2720 MachineBasicBlock *BB,
2721 const TargetRegisterInfo *TRI) {
2722 // Scan forward through BB for a use/def of CCR.
2723 MachineBasicBlock::iterator miI(std::next(SelectItr));
2724 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
2725 const MachineInstr &mi = *miI;
2726 if (mi.readsRegister(M68k::CCR))
2727 return false;
2728 if (mi.definesRegister(M68k::CCR))
2729 break; // Should have kill-flag - update below.
2730 }
2731
2732 // If we hit the end of the block, check whether CCR is live into a
2733 // successor.
2734 if (miI == BB->end())
2735 for (const auto *SBB : BB->successors())
2736 if (SBB->isLiveIn(M68k::CCR))
2737 return false;
2738
2739 // We found a def, or hit the end of the basic block and CCR wasn't live
2740 // out. SelectMI should have a kill flag on CCR.
2741 SelectItr->addRegisterKilled(M68k::CCR, TRI);
2742 return true;
2743}
2744
2745MachineBasicBlock *
2746M68kTargetLowering::EmitLoweredSelect(MachineInstr &MI,
2747 MachineBasicBlock *MBB) const {
2748 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
2749 DebugLoc DL = MI.getDebugLoc();
2750
2751 // To "insert" a SELECT_CC instruction, we actually have to insert the
2752 // diamond control-flow pattern. The incoming instruction knows the
2753 // destination vreg to set, the condition code register to branch on, the
2754 // true/false values to select between, and a branch opcode to use.
2755 const BasicBlock *BB = MBB->getBasicBlock();
2756 MachineFunction::iterator It = ++MBB->getIterator();
2757
2758 // ThisMBB:
2759 // ...
2760 // TrueVal = ...
2761 // cmp ccX, r1, r2
2762 // bcc Copy1MBB
2763 // fallthrough --> Copy0MBB
2764 MachineBasicBlock *ThisMBB = MBB;
2765 MachineFunction *F = MBB->getParent();
2766
2767 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
2768 // as described above, by inserting a MBB, and then making a PHI at the join
2769 // point to select the true and false operands of the CMOV in the PHI.
2770 //
2771 // The code also handles two different cases of multiple CMOV opcodes
2772 // in a row.
2773 //
2774 // Case 1:
2775 // In this case, there are multiple CMOVs in a row, all which are based on
2776 // the same condition setting (or the exact opposite condition setting).
2777 // In this case we can lower all the CMOVs using a single inserted MBB, and
2778 // then make a number of PHIs at the join point to model the CMOVs. The only
2779 // trickiness here, is that in a case like:
2780 //
2781 // t2 = CMOV cond1 t1, f1
2782 // t3 = CMOV cond1 t2, f2
2783 //
2784 // when rewriting this into PHIs, we have to perform some renaming on the
2785 // temps since you cannot have a PHI operand refer to a PHI result earlier
2786 // in the same block. The "simple" but wrong lowering would be:
2787 //
2788 // t2 = PHI t1(BB1), f1(BB2)
2789 // t3 = PHI t2(BB1), f2(BB2)
2790 //
2791 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
2792 // renaming is to note that on the path through BB1, t2 is really just a
2793 // copy of t1, and do that renaming, properly generating:
2794 //
2795 // t2 = PHI t1(BB1), f1(BB2)
2796 // t3 = PHI t1(BB1), f2(BB2)
2797 //
2798 // Case 2, we lower cascaded CMOVs such as
2799 //
2800 // (CMOV (CMOV F, T, cc1), T, cc2)
2801 //
2802 // to two successives branches.
2803 MachineInstr *CascadedCMOV = nullptr;
2804 MachineInstr *LastCMOV = &MI;
2805 M68k::CondCode CC = M68k::CondCode(MI.getOperand(3).getImm());
2806 M68k::CondCode OppCC = M68k::GetOppositeBranchCondition(CC);
2807 MachineBasicBlock::iterator NextMIIt =
2808 std::next(MachineBasicBlock::iterator(MI));
2809
2810 // Check for case 1, where there are multiple CMOVs with the same condition
2811 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
2812 // number of jumps the most.
2813
2814 if (isCMOVPseudo(MI)) {
3
Taking false branch
2815 // See if we have a string of CMOVS with the same condition.
2816 while (NextMIIt != MBB->end() && isCMOVPseudo(*NextMIIt) &&
2817 (NextMIIt->getOperand(3).getImm() == CC ||
2818 NextMIIt->getOperand(3).getImm() == OppCC)) {
2819 LastCMOV = &*NextMIIt;
2820 ++NextMIIt;
2821 }
2822 }
2823
2824 // This checks for case 2, but only do this if we didn't already find
2825 // case 1, as indicated by LastCMOV == MI.
2826 if (LastCMOV == &MI && NextMIIt != MBB->end() &&
5
Assuming the condition is true
6
Taking true branch
2827 NextMIIt->getOpcode() == MI.getOpcode() &&
4
Assuming the condition is true
2828 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
2829 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
2830 NextMIIt->getOperand(1).isKill()) {
2831 CascadedCMOV = &*NextMIIt;
2832 }
2833
2834 MachineBasicBlock *Jcc1MBB = nullptr;
2835
2836 // If we have a cascaded CMOV, we lower it to two successive branches to
2837 // the same block. CCR is used by both, so mark it as live in the second.
2838 if (CascadedCMOV
6.1
'CascadedCMOV' is non-null
6.1
'CascadedCMOV' is non-null
) {
7
Taking true branch
2839 Jcc1MBB = F->CreateMachineBasicBlock(BB);
2840 F->insert(It, Jcc1MBB);
2841 Jcc1MBB->addLiveIn(M68k::CCR);
2842 }
2843
2844 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(BB);
2845 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
2846 F->insert(It, Copy0MBB);
2847 F->insert(It, SinkMBB);
2848
2849 // If the CCR register isn't dead in the terminator, then claim that it's
2850 // live into the sink and copy blocks.
2851 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2852
2853 MachineInstr *LastCCRSUser = CascadedCMOV
7.1
'CascadedCMOV' is non-null
7.1
'CascadedCMOV' is non-null
? CascadedCMOV : LastCMOV;
8
'?' condition is true
2854 if (!LastCCRSUser->killsRegister(M68k::CCR) &&
9
Taking false branch
2855 !checkAndUpdateCCRKill(LastCCRSUser, MBB, TRI)) {
2856 Copy0MBB->addLiveIn(M68k::CCR);
2857 SinkMBB->addLiveIn(M68k::CCR);
2858 }
2859
2860 // Transfer the remainder of MBB and its successor edges to SinkMBB.
2861 SinkMBB->splice(SinkMBB->begin(), MBB,
2862 std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
2863 SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
2864
2865 // Add the true and fallthrough blocks as its successors.
2866 if (CascadedCMOV
9.1
'CascadedCMOV' is non-null
9.1
'CascadedCMOV' is non-null
) {
10
Taking true branch
2867 // The fallthrough block may be Jcc1MBB, if we have a cascaded CMOV.
2868 MBB->addSuccessor(Jcc1MBB);
2869
2870 // In that case, Jcc1MBB will itself fallthrough the Copy0MBB, and
2871 // jump to the SinkMBB.
2872 Jcc1MBB->addSuccessor(Copy0MBB);
2873 Jcc1MBB->addSuccessor(SinkMBB);
2874 } else {
2875 MBB->addSuccessor(Copy0MBB);
2876 }
2877
2878 // The true block target of the first (or only) branch is always SinkMBB.
2879 MBB->addSuccessor(SinkMBB);
2880
2881 // Create the conditional branch instruction.
2882 unsigned Opc = M68k::GetCondBranchFromCond(CC);
2883 BuildMI(MBB, DL, TII->get(Opc)).addMBB(SinkMBB);
2884
2885 if (CascadedCMOV
10.1
'CascadedCMOV' is non-null
10.1
'CascadedCMOV' is non-null
) {
11
Taking true branch
2886 unsigned Opc2 = M68k::GetCondBranchFromCond(
2887 (M68k::CondCode)CascadedCMOV->getOperand(3).getImm());
2888 BuildMI(Jcc1MBB, DL, TII->get(Opc2)).addMBB(SinkMBB);
2889 }
2890
2891 // Copy0MBB:
2892 // %FalseValue = ...
2893 // # fallthrough to SinkMBB
2894 Copy0MBB->addSuccessor(SinkMBB);
2895
2896 // SinkMBB:
2897 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
2898 // ...
2899 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
2900 MachineBasicBlock::iterator MIItEnd =
2901 std::next(MachineBasicBlock::iterator(LastCMOV));
2902 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
2903 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
2904 MachineInstrBuilder MIB;
12
Calling defaulted default constructor for 'MachineInstrBuilder'
14
Returning from default constructor for 'MachineInstrBuilder'
2905
2906 // As we are creating the PHIs, we have to be careful if there is more than
2907 // one. Later CMOVs may reference the results of earlier CMOVs, but later
2908 // PHIs have to reference the individual true/false inputs from earlier PHIs.
2909 // That also means that PHI construction must work forward from earlier to
2910 // later, and that the code must maintain a mapping from earlier PHI's
2911 // destination registers, and the registers that went into the PHI.
2912
2913 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
15
Loop condition is false. Execution continues on line 2943
2914 unsigned DestReg = MIIt->getOperand(0).getReg();
2915 unsigned Op1Reg = MIIt->getOperand(1).getReg();
2916 unsigned Op2Reg = MIIt->getOperand(2).getReg();
2917
2918 // If this CMOV we are generating is the opposite condition from
2919 // the jump we generated, then we have to swap the operands for the
2920 // PHI that is going to be generated.
2921 if (MIIt->getOperand(3).getImm() == OppCC)
2922 std::swap(Op1Reg, Op2Reg);
2923
2924 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
2925 Op1Reg = RegRewriteTable[Op1Reg].first;
2926
2927 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
2928 Op2Reg = RegRewriteTable[Op2Reg].second;
2929
2930 MIB =
2931 BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(M68k::PHI), DestReg)
2932 .addReg(Op1Reg)
2933 .addMBB(Copy0MBB)
2934 .addReg(Op2Reg)
2935 .addMBB(ThisMBB);
2936
2937 // Add this PHI to the rewrite table.
2938 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
2939 }
2940
2941 // If we have a cascaded CMOV, the second Jcc provides the same incoming
2942 // value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
2943 if (CascadedCMOV
15.1
'CascadedCMOV' is non-null
15.1
'CascadedCMOV' is non-null
) {
16
Taking true branch
2944 MIB.addReg(MI.getOperand(2).getReg()).addMBB(Jcc1MBB);
17
Calling 'MachineInstrBuilder::addReg'
2945 // Copy the PHI result to the register defined by the second CMOV.
2946 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
2947 DL, TII->get(TargetOpcode::COPY),
2948 CascadedCMOV->getOperand(0).getReg())
2949 .addReg(MI.getOperand(0).getReg());
2950 CascadedCMOV->eraseFromParent();
2951 }
2952
2953 // Now remove the CMOV(s).
2954 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;)
2955 (MIIt++)->eraseFromParent();
2956
2957 return SinkMBB;
2958}
2959
2960MachineBasicBlock *
2961M68kTargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
2962 MachineBasicBlock *BB) const {
2963 llvm_unreachable("Cannot lower Segmented Stack Alloca with stack-split on")::llvm::llvm_unreachable_internal("Cannot lower Segmented Stack Alloca with stack-split on"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 2963)
;
2964}
2965
2966MachineBasicBlock *
2967M68kTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
2968 MachineBasicBlock *BB) const {
2969 switch (MI.getOpcode()) {
1
Control jumps to 'case CMOV8d:' at line 2972
2970 default:
2971 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 2971)
;
2972 case M68k::CMOV8d:
2973 case M68k::CMOV16d:
2974 case M68k::CMOV32r:
2975 return EmitLoweredSelect(MI, BB);
2
Calling 'M68kTargetLowering::EmitLoweredSelect'
2976 case M68k::SALLOCA:
2977 return EmitLoweredSegAlloca(MI, BB);
2978 }
2979}
2980
2981SDValue M68kTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2982 MachineFunction &MF = DAG.getMachineFunction();
2983 auto PtrVT = getPointerTy(MF.getDataLayout());
2984 M68kMachineFunctionInfo *FuncInfo = MF.getInfo<M68kMachineFunctionInfo>();
2985
2986 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2987 SDLoc DL(Op);
2988
2989 // vastart just stores the address of the VarArgsFrameIndex slot into the
2990 // memory location argument.
2991 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
2992 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
2993 MachinePointerInfo(SV));
2994}
2995
2996// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
2997// Calls to _alloca are needed to probe the stack when allocating more than 4k
2998// bytes in one go. Touching the stack at 4K increments is necessary to ensure
2999// that the guard pages used by the OS virtual memory manager are allocated in
3000// correct sequence.
3001SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
3002 SelectionDAG &DAG) const {
3003 MachineFunction &MF = DAG.getMachineFunction();
3004 bool SplitStack = MF.shouldSplitStack();
3005
3006 SDLoc DL(Op);
3007
3008 // Get the inputs.
3009 SDNode *Node = Op.getNode();
3010 SDValue Chain = Op.getOperand(0);
3011 SDValue Size = Op.getOperand(1);
3012 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3013 EVT VT = Node->getValueType(0);
3014
3015 // Chain the dynamic stack allocation so that it doesn't modify the stack
3016 // pointer when other instructions are using the stack.
3017 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
3018
3019 SDValue Result;
3020 if (SplitStack) {
3021 auto &MRI = MF.getRegInfo();
3022 auto SPTy = getPointerTy(DAG.getDataLayout());
3023 auto *ARClass = getRegClassFor(SPTy);
3024 unsigned Vreg = MRI.createVirtualRegister(ARClass);
3025 Chain = DAG.getCopyToReg(Chain, DL, Vreg, Size);
3026 Result = DAG.getNode(M68kISD::SEG_ALLOCA, DL, SPTy, Chain,
3027 DAG.getRegister(Vreg, SPTy));
3028 } else {
3029 auto &TLI = DAG.getTargetLoweringInfo();
3030 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
3031 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"((SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!") ? static_cast
<void> (0) : __assert_fail ("SPReg && \"Target cannot require DYNAMIC_STACKALLOC expansion and\" \" not tell us which reg is the stack pointer!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 3032, __PRETTY_FUNCTION__))
3032 " not tell us which reg is the stack pointer!")((SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!") ? static_cast
<void> (0) : __assert_fail ("SPReg && \"Target cannot require DYNAMIC_STACKALLOC expansion and\" \" not tell us which reg is the stack pointer!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/M68k/M68kISelLowering.cpp"
, 3032, __PRETTY_FUNCTION__))
;
3033
3034 SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT);
3035 Chain = SP.getValue(1);
3036 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3037 unsigned StackAlign = TFI.getStackAlignment();
3038 Result = DAG.getNode(ISD::SUB, DL, VT, SP, Size); // Value
3039 if (Align > StackAlign)
3040 Result = DAG.getNode(ISD::AND, DL, VT, Result,
3041 DAG.getConstant(-(uint64_t)Align, DL, VT));
3042 Chain = DAG.getCopyToReg(Chain, DL, SPReg, Result); // Output chain
3043 }
3044
3045 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
3046 DAG.getIntPtrConstant(0, DL, true), SDValue(), DL);
3047
3048 SDValue Ops[2] = {Result, Chain};
3049 return DAG.getMergeValues(Ops, DL);
3050}
3051
3052//===----------------------------------------------------------------------===//
3053// DAG Combine
3054//===----------------------------------------------------------------------===//
3055
3056static SDValue getSETCC(M68k::CondCode Cond, SDValue CCR, const SDLoc &dl,
3057 SelectionDAG &DAG) {
3058 return DAG.getNode(M68kISD::SETCC, dl, MVT::i8,
3059 DAG.getConstant(Cond, dl, MVT::i8), CCR);
3060}
3061// When legalizing carry, we create carries via add X, -1
3062// If that comes from an actual carry, via setcc, we use the
3063// carry directly.
3064static SDValue combineCarryThroughADD(SDValue CCR) {
3065 if (CCR.getOpcode() == M68kISD::ADD) {
3066 if (isAllOnesConstant(CCR.getOperand(1))) {
3067 SDValue Carry = CCR.getOperand(0);
3068 while (Carry.getOpcode() == ISD::TRUNCATE ||
3069 Carry.getOpcode() == ISD::ZERO_EXTEND ||
3070 Carry.getOpcode() == ISD::SIGN_EXTEND ||
3071 Carry.getOpcode() == ISD::ANY_EXTEND ||
3072 (Carry.getOpcode() == ISD::AND &&
3073 isOneConstant(Carry.getOperand(1))))
3074 Carry = Carry.getOperand(0);
3075 if (Carry.getOpcode() == M68kISD::SETCC ||
3076 Carry.getOpcode() == M68kISD::SETCC_CARRY) {
3077 if (Carry.getConstantOperandVal(0) == M68k::COND_CS)
3078 return Carry.getOperand(1);
3079 }
3080 }
3081 }
3082
3083 return SDValue();
3084}
3085
3086/// Optimize a CCR definition used according to the condition code \p CC into
3087/// a simpler CCR value, potentially returning a new \p CC and replacing uses
3088/// of chain values.
3089static SDValue combineSetCCCCR(SDValue CCR, M68k::CondCode &CC,
3090 SelectionDAG &DAG,
3091 const M68kSubtarget &Subtarget) {
3092 if (CC == M68k::COND_CS)
3093 if (SDValue Flags = combineCarryThroughADD(CCR))
3094 return Flags;
3095
3096 return SDValue();
3097}
3098
3099// Optimize RES = M68kISD::SETCC CONDCODE, CCR_INPUT
3100static SDValue combineM68kSetCC(SDNode *N, SelectionDAG &DAG,
3101 const M68kSubtarget &Subtarget) {
3102 SDLoc DL(N);
3103 M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(0));
3104 SDValue CCR = N->getOperand(1);
3105
3106 // Try to simplify the CCR and condition code operands.
3107 if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget))
3108 return getSETCC(CC, Flags, DL, DAG);
3109
3110 return SDValue();
3111}
3112static SDValue combineM68kBrCond(SDNode *N, SelectionDAG &DAG,
3113 const M68kSubtarget &Subtarget) {
3114 SDLoc DL(N);
3115 M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(2));
3116 SDValue CCR = N->getOperand(3);
3117
3118 // Try to simplify the CCR and condition code operands.
3119 // Make sure to not keep references to operands, as combineSetCCCCR can
3120 // RAUW them under us.
3121 if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget)) {
3122 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
3123 return DAG.getNode(M68kISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
3124 N->getOperand(1), Cond, Flags);
3125 }
3126
3127 return SDValue();
3128}
3129
3130static SDValue combineSUBX(SDNode *N, SelectionDAG &DAG) {
3131 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3132 MVT VT = N->getSimpleValueType(0);
3133 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3134 return DAG.getNode(M68kISD::SUBX, SDLoc(N), VTs, N->getOperand(0),
3135 N->getOperand(1), Flags);
3136 }
3137
3138 return SDValue();
3139}
3140
3141// Optimize RES, CCR = M68kISD::ADDX LHS, RHS, CCR
3142static SDValue combineADDX(SDNode *N, SelectionDAG &DAG,
3143 TargetLowering::DAGCombinerInfo &DCI) {
3144 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3145 MVT VT = N->getSimpleValueType(0);
3146 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3147 return DAG.getNode(M68kISD::ADDX, SDLoc(N), VTs, N->getOperand(0),
3148 N->getOperand(1), Flags);
3149 }
3150
3151 return SDValue();
3152}
3153
3154SDValue M68kTargetLowering::PerformDAGCombine(SDNode *N,
3155 DAGCombinerInfo &DCI) const {
3156 SelectionDAG &DAG = DCI.DAG;
3157 switch (N->getOpcode()) {
3158 case M68kISD::SUBX:
3159 return combineSUBX(N, DAG);
3160 case M68kISD::ADDX:
3161 return combineADDX(N, DAG, DCI);
3162 case M68kISD::SETCC:
3163 return combineM68kSetCC(N, DAG, Subtarget);
3164 case M68kISD::BRCOND:
3165 return combineM68kBrCond(N, DAG, Subtarget);
3166 }
3167
3168 return SDValue();
3169}
3170
3171//===----------------------------------------------------------------------===//
3172// M68kISD Node Names
3173//===----------------------------------------------------------------------===//
3174const char *M68kTargetLowering::getTargetNodeName(unsigned Opcode) const {
3175 switch (Opcode) {
3176 case M68kISD::CALL:
3177 return "M68kISD::CALL";
3178 case M68kISD::TAIL_CALL:
3179 return "M68kISD::TAIL_CALL";
3180 case M68kISD::RET:
3181 return "M68kISD::RET";
3182 case M68kISD::TC_RETURN:
3183 return "M68kISD::TC_RETURN";
3184 case M68kISD::ADD:
3185 return "M68kISD::ADD";
3186 case M68kISD::SUB:
3187 return "M68kISD::SUB";
3188 case M68kISD::ADDX:
3189 return "M68kISD::ADDX";
3190 case M68kISD::SUBX:
3191 return "M68kISD::SUBX";
3192 case M68kISD::SMUL:
3193 return "M68kISD::SMUL";
3194 case M68kISD::UMUL:
3195 return "M68kISD::UMUL";
3196 case M68kISD::OR:
3197 return "M68kISD::OR";
3198 case M68kISD::XOR:
3199 return "M68kISD::XOR";
3200 case M68kISD::AND:
3201 return "M68kISD::AND";
3202 case M68kISD::CMP:
3203 return "M68kISD::CMP";
3204 case M68kISD::BT:
3205 return "M68kISD::BT";
3206 case M68kISD::SELECT:
3207 return "M68kISD::SELECT";
3208 case M68kISD::CMOV:
3209 return "M68kISD::CMOV";
3210 case M68kISD::BRCOND:
3211 return "M68kISD::BRCOND";
3212 case M68kISD::SETCC:
3213 return "M68kISD::SETCC";
3214 case M68kISD::SETCC_CARRY:
3215 return "M68kISD::SETCC_CARRY";
3216 case M68kISD::GLOBAL_BASE_REG:
3217 return "M68kISD::GLOBAL_BASE_REG";
3218 case M68kISD::Wrapper:
3219 return "M68kISD::Wrapper";
3220 case M68kISD::WrapperPC:
3221 return "M68kISD::WrapperPC";
3222 case M68kISD::SEG_ALLOCA:
3223 return "M68kISD::SEG_ALLOCA";
3224 default:
3225 return NULL__null;
3226 }
3227}

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h

1//===- CodeGen/MachineInstrBuilder.h - Simplify creation of MIs --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes a function named BuildMI, which is useful for dramatically
10// simplifying how MachineInstr's are created. It allows use of code like this:
11//
12// M = BuildMI(MBB, MI, DL, TII.get(X86::ADD8rr), Dst)
13// .addReg(argVal1)
14// .addReg(argVal2);
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
19#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
20
21#include "llvm/ADT/ArrayRef.h"
22#include "llvm/CodeGen/GlobalISel/Utils.h"
23#include "llvm/CodeGen/MachineBasicBlock.h"
24#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBundle.h"
27#include "llvm/CodeGen/MachineOperand.h"
28#include "llvm/CodeGen/TargetRegisterInfo.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Intrinsics.h"
31#include "llvm/Support/ErrorHandling.h"
32#include <cassert>
33#include <cstdint>
34#include <utility>
35
36namespace llvm {
37
38class MCInstrDesc;
39class MDNode;
40
41namespace RegState {
42
43enum {
44 /// Register definition.
45 Define = 0x2,
46 /// Not emitted register (e.g. carry, or temporary result).
47 Implicit = 0x4,
48 /// The last use of a register.
49 Kill = 0x8,
50 /// Unused definition.
51 Dead = 0x10,
52 /// Value of the register doesn't matter.
53 Undef = 0x20,
54 /// Register definition happens before uses.
55 EarlyClobber = 0x40,
56 /// Register 'use' is for debugging purpose.
57 Debug = 0x80,
58 /// Register reads a value that is defined inside the same instruction or
59 /// bundle.
60 InternalRead = 0x100,
61 /// Register that may be renamed.
62 Renamable = 0x200,
63 DefineNoRead = Define | Undef,
64 ImplicitDefine = Implicit | Define,
65 ImplicitKill = Implicit | Kill
66};
67
68} // end namespace RegState
69
70class MachineInstrBuilder {
71 MachineFunction *MF = nullptr;
72 MachineInstr *MI = nullptr;
13
Null pointer value stored to 'MIB.MI'
73
74public:
75 MachineInstrBuilder() = default;
76
77 /// Create a MachineInstrBuilder for manipulating an existing instruction.
78 /// F must be the machine function that was used to allocate I.
79 MachineInstrBuilder(MachineFunction &F, MachineInstr *I) : MF(&F), MI(I) {}
80 MachineInstrBuilder(MachineFunction &F, MachineBasicBlock::iterator I)
81 : MF(&F), MI(&*I) {}
82
83 /// Allow automatic conversion to the machine instruction we are working on.
84 operator MachineInstr*() const { return MI; }
85 MachineInstr *operator->() const { return MI; }
86 operator MachineBasicBlock::iterator() const { return MI; }
87
88 /// If conversion operators fail, use this method to get the MachineInstr
89 /// explicitly.
90 MachineInstr *getInstr() const { return MI; }
91
92 /// Get the register for the operand index.
93 /// The operand at the index should be a register (asserted by
94 /// MachineOperand).
95 Register getReg(unsigned Idx) const { return MI->getOperand(Idx).getReg(); }
96
97 /// Add a new virtual register operand.
98 const MachineInstrBuilder &addReg(Register RegNo, unsigned flags = 0,
99 unsigned SubReg = 0) const {
100 assert((flags & 0x1) == 0 &&(((flags & 0x1) == 0 && "Passing in 'true' to addReg is forbidden! Use enums instead."
) ? static_cast<void> (0) : __assert_fail ("(flags & 0x1) == 0 && \"Passing in 'true' to addReg is forbidden! Use enums instead.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 101, __PRETTY_FUNCTION__))
18
'?' condition is true
101 "Passing in 'true' to addReg is forbidden! Use enums instead.")(((flags & 0x1) == 0 && "Passing in 'true' to addReg is forbidden! Use enums instead."
) ? static_cast<void> (0) : __assert_fail ("(flags & 0x1) == 0 && \"Passing in 'true' to addReg is forbidden! Use enums instead.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 101, __PRETTY_FUNCTION__))
;
102 MI->addOperand(*MF, MachineOperand::CreateReg(RegNo,
19
Called C++ object pointer is null
103 flags & RegState::Define,
104 flags & RegState::Implicit,
105 flags & RegState::Kill,
106 flags & RegState::Dead,
107 flags & RegState::Undef,
108 flags & RegState::EarlyClobber,
109 SubReg,
110 flags & RegState::Debug,
111 flags & RegState::InternalRead,
112 flags & RegState::Renamable));
113 return *this;
114 }
115
116 /// Add a virtual register definition operand.
117 const MachineInstrBuilder &addDef(Register RegNo, unsigned Flags = 0,
118 unsigned SubReg = 0) const {
119 return addReg(RegNo, Flags | RegState::Define, SubReg);
120 }
121
122 /// Add a virtual register use operand. It is an error for Flags to contain
123 /// `RegState::Define` when calling this function.
124 const MachineInstrBuilder &addUse(Register RegNo, unsigned Flags = 0,
125 unsigned SubReg = 0) const {
126 assert(!(Flags & RegState::Define) &&((!(Flags & RegState::Define) && "Misleading addUse defines register, use addReg instead."
) ? static_cast<void> (0) : __assert_fail ("!(Flags & RegState::Define) && \"Misleading addUse defines register, use addReg instead.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 127, __PRETTY_FUNCTION__))
127 "Misleading addUse defines register, use addReg instead.")((!(Flags & RegState::Define) && "Misleading addUse defines register, use addReg instead."
) ? static_cast<void> (0) : __assert_fail ("!(Flags & RegState::Define) && \"Misleading addUse defines register, use addReg instead.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 127, __PRETTY_FUNCTION__))
;
128 return addReg(RegNo, Flags, SubReg);
129 }
130
131 /// Add a new immediate operand.
132 const MachineInstrBuilder &addImm(int64_t Val) const {
133 MI->addOperand(*MF, MachineOperand::CreateImm(Val));
134 return *this;
135 }
136
137 const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
138 MI->addOperand(*MF, MachineOperand::CreateCImm(Val));
139 return *this;
140 }
141
142 const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
143 MI->addOperand(*MF, MachineOperand::CreateFPImm(Val));
144 return *this;
145 }
146
147 const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB,
148 unsigned TargetFlags = 0) const {
149 MI->addOperand(*MF, MachineOperand::CreateMBB(MBB, TargetFlags));
150 return *this;
151 }
152
153 const MachineInstrBuilder &addFrameIndex(int Idx) const {
154 MI->addOperand(*MF, MachineOperand::CreateFI(Idx));
155 return *this;
156 }
157
158 const MachineInstrBuilder &
159 addConstantPoolIndex(unsigned Idx, int Offset = 0,
160 unsigned TargetFlags = 0) const {
161 MI->addOperand(*MF, MachineOperand::CreateCPI(Idx, Offset, TargetFlags));
162 return *this;
163 }
164
165 const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
166 unsigned TargetFlags = 0) const {
167 MI->addOperand(*MF, MachineOperand::CreateTargetIndex(Idx, Offset,
168 TargetFlags));
169 return *this;
170 }
171
172 const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
173 unsigned TargetFlags = 0) const {
174 MI->addOperand(*MF, MachineOperand::CreateJTI(Idx, TargetFlags));
175 return *this;
176 }
177
178 const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
179 int64_t Offset = 0,
180 unsigned TargetFlags = 0) const {
181 MI->addOperand(*MF, MachineOperand::CreateGA(GV, Offset, TargetFlags));
182 return *this;
183 }
184
185 const MachineInstrBuilder &addExternalSymbol(const char *FnName,
186 unsigned TargetFlags = 0) const {
187 MI->addOperand(*MF, MachineOperand::CreateES(FnName, TargetFlags));
188 return *this;
189 }
190
191 const MachineInstrBuilder &addBlockAddress(const BlockAddress *BA,
192 int64_t Offset = 0,
193 unsigned TargetFlags = 0) const {
194 MI->addOperand(*MF, MachineOperand::CreateBA(BA, Offset, TargetFlags));
195 return *this;
196 }
197
198 const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
199 MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask));
200 return *this;
201 }
202
203 const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
204 MI->addMemOperand(*MF, MMO);
205 return *this;
206 }
207
208 const MachineInstrBuilder &
209 setMemRefs(ArrayRef<MachineMemOperand *> MMOs) const {
210 MI->setMemRefs(*MF, MMOs);
211 return *this;
212 }
213
214 const MachineInstrBuilder &cloneMemRefs(const MachineInstr &OtherMI) const {
215 MI->cloneMemRefs(*MF, OtherMI);
216 return *this;
217 }
218
219 const MachineInstrBuilder &
220 cloneMergedMemRefs(ArrayRef<const MachineInstr *> OtherMIs) const {
221 MI->cloneMergedMemRefs(*MF, OtherMIs);
222 return *this;
223 }
224
225 const MachineInstrBuilder &add(const MachineOperand &MO) const {
226 MI->addOperand(*MF, MO);
227 return *this;
228 }
229
230 const MachineInstrBuilder &add(ArrayRef<MachineOperand> MOs) const {
231 for (const MachineOperand &MO : MOs) {
232 MI->addOperand(*MF, MO);
233 }
234 return *this;
235 }
236
237 const MachineInstrBuilder &addMetadata(const MDNode *MD) const {
238 MI->addOperand(*MF, MachineOperand::CreateMetadata(MD));
239 assert((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable())(((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable
()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 241, __PRETTY_FUNCTION__))
240 : true) &&(((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable
()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 241, __PRETTY_FUNCTION__))
241 "first MDNode argument of a DBG_VALUE not a variable")(((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable
()) : true) && "first MDNode argument of a DBG_VALUE not a variable"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable()) : true) && \"first MDNode argument of a DBG_VALUE not a variable\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 241, __PRETTY_FUNCTION__))
;
242 assert((MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel())(((MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel
()) : true) && "first MDNode argument of a DBG_LABEL not a label"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel()) : true) && \"first MDNode argument of a DBG_LABEL not a label\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 244, __PRETTY_FUNCTION__))
243 : true) &&(((MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel
()) : true) && "first MDNode argument of a DBG_LABEL not a label"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel()) : true) && \"first MDNode argument of a DBG_LABEL not a label\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 244, __PRETTY_FUNCTION__))
244 "first MDNode argument of a DBG_LABEL not a label")(((MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel
()) : true) && "first MDNode argument of a DBG_LABEL not a label"
) ? static_cast<void> (0) : __assert_fail ("(MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel()) : true) && \"first MDNode argument of a DBG_LABEL not a label\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 244, __PRETTY_FUNCTION__))
;
245 return *this;
246 }
247
248 const MachineInstrBuilder &addCFIIndex(unsigned CFIIndex) const {
249 MI->addOperand(*MF, MachineOperand::CreateCFIIndex(CFIIndex));
250 return *this;
251 }
252
253 const MachineInstrBuilder &addIntrinsicID(Intrinsic::ID ID) const {
254 MI->addOperand(*MF, MachineOperand::CreateIntrinsicID(ID));
255 return *this;
256 }
257
258 const MachineInstrBuilder &addPredicate(CmpInst::Predicate Pred) const {
259 MI->addOperand(*MF, MachineOperand::CreatePredicate(Pred));
260 return *this;
261 }
262
263 const MachineInstrBuilder &addShuffleMask(ArrayRef<int> Val) const {
264 MI->addOperand(*MF, MachineOperand::CreateShuffleMask(Val));
265 return *this;
266 }
267
268 const MachineInstrBuilder &addSym(MCSymbol *Sym,
269 unsigned char TargetFlags = 0) const {
270 MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym, TargetFlags));
271 return *this;
272 }
273
274 const MachineInstrBuilder &setMIFlags(unsigned Flags) const {
275 MI->setFlags(Flags);
276 return *this;
277 }
278
279 const MachineInstrBuilder &setMIFlag(MachineInstr::MIFlag Flag) const {
280 MI->setFlag(Flag);
281 return *this;
282 }
283
284 // Add a displacement from an existing MachineOperand with an added offset.
285 const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
286 unsigned char TargetFlags = 0) const {
287 // If caller specifies new TargetFlags then use it, otherwise the
288 // default behavior is to copy the target flags from the existing
289 // MachineOperand. This means if the caller wants to clear the
290 // target flags it needs to do so explicitly.
291 if (0 == TargetFlags)
292 TargetFlags = Disp.getTargetFlags();
293
294 switch (Disp.getType()) {
295 default:
296 llvm_unreachable("Unhandled operand type in addDisp()")::llvm::llvm_unreachable_internal("Unhandled operand type in addDisp()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 296)
;
297 case MachineOperand::MO_Immediate:
298 return addImm(Disp.getImm() + off);
299 case MachineOperand::MO_ConstantPoolIndex:
300 return addConstantPoolIndex(Disp.getIndex(), Disp.getOffset() + off,
301 TargetFlags);
302 case MachineOperand::MO_GlobalAddress:
303 return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
304 TargetFlags);
305 case MachineOperand::MO_BlockAddress:
306 return addBlockAddress(Disp.getBlockAddress(), Disp.getOffset() + off,
307 TargetFlags);
308 case MachineOperand::MO_JumpTableIndex:
309 assert(off == 0 && "cannot create offset into jump tables")((off == 0 && "cannot create offset into jump tables"
) ? static_cast<void> (0) : __assert_fail ("off == 0 && \"cannot create offset into jump tables\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 309, __PRETTY_FUNCTION__))
;
310 return addJumpTableIndex(Disp.getIndex(), TargetFlags);
311 }
312 }
313
314 /// Copy all the implicit operands from OtherMI onto this one.
315 const MachineInstrBuilder &
316 copyImplicitOps(const MachineInstr &OtherMI) const {
317 MI->copyImplicitOps(*MF, OtherMI);
318 return *this;
319 }
320
321 bool constrainAllUses(const TargetInstrInfo &TII,
322 const TargetRegisterInfo &TRI,
323 const RegisterBankInfo &RBI) const {
324 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
325 }
326};
327
328/// Builder interface. Specify how to create the initial instruction itself.
329inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
330 const MCInstrDesc &MCID) {
331 return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL));
332}
333
334/// This version of the builder sets up the first operand as a
335/// destination virtual register.
336inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
337 const MCInstrDesc &MCID, Register DestReg) {
338 return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL))
339 .addReg(DestReg, RegState::Define);
340}
341
342/// This version of the builder inserts the newly-built instruction before
343/// the given position in the given MachineBasicBlock, and sets up the first
344/// operand as a destination virtual register.
345inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
346 MachineBasicBlock::iterator I,
347 const DebugLoc &DL, const MCInstrDesc &MCID,
348 Register DestReg) {
349 MachineFunction &MF = *BB.getParent();
350 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
351 BB.insert(I, MI);
352 return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
353}
354
355/// This version of the builder inserts the newly-built instruction before
356/// the given position in the given MachineBasicBlock, and sets up the first
357/// operand as a destination virtual register.
358///
359/// If \c I is inside a bundle, then the newly inserted \a MachineInstr is
360/// added to the same bundle.
361inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
362 MachineBasicBlock::instr_iterator I,
363 const DebugLoc &DL, const MCInstrDesc &MCID,
364 Register DestReg) {
365 MachineFunction &MF = *BB.getParent();
366 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
367 BB.insert(I, MI);
368 return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
369}
370
371inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
372 const DebugLoc &DL, const MCInstrDesc &MCID,
373 Register DestReg) {
374 // Calling the overload for instr_iterator is always correct. However, the
375 // definition is not available in headers, so inline the check.
376 if (I.isInsideBundle())
377 return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID, DestReg);
378 return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID, DestReg);
379}
380
381inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
382 const DebugLoc &DL, const MCInstrDesc &MCID,
383 Register DestReg) {
384 return BuildMI(BB, *I, DL, MCID, DestReg);
385}
386
387/// This version of the builder inserts the newly-built instruction before the
388/// given position in the given MachineBasicBlock, and does NOT take a
389/// destination register.
390inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
391 MachineBasicBlock::iterator I,
392 const DebugLoc &DL,
393 const MCInstrDesc &MCID) {
394 MachineFunction &MF = *BB.getParent();
395 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
396 BB.insert(I, MI);
397 return MachineInstrBuilder(MF, MI);
398}
399
400inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
401 MachineBasicBlock::instr_iterator I,
402 const DebugLoc &DL,
403 const MCInstrDesc &MCID) {
404 MachineFunction &MF = *BB.getParent();
405 MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
406 BB.insert(I, MI);
407 return MachineInstrBuilder(MF, MI);
408}
409
410inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
411 const DebugLoc &DL,
412 const MCInstrDesc &MCID) {
413 // Calling the overload for instr_iterator is always correct. However, the
414 // definition is not available in headers, so inline the check.
415 if (I.isInsideBundle())
416 return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID);
417 return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID);
418}
419
420inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
421 const DebugLoc &DL,
422 const MCInstrDesc &MCID) {
423 return BuildMI(BB, *I, DL, MCID);
424}
425
426/// This version of the builder inserts the newly-built instruction at the end
427/// of the given MachineBasicBlock, and does NOT take a destination register.
428inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
429 const MCInstrDesc &MCID) {
430 return BuildMI(*BB, BB->end(), DL, MCID);
431}
432
433/// This version of the builder inserts the newly-built instruction at the
434/// end of the given MachineBasicBlock, and sets up the first operand as a
435/// destination virtual register.
436inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
437 const MCInstrDesc &MCID, Register DestReg) {
438 return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
439}
440
441/// This version of the builder builds a DBG_VALUE intrinsic
442/// for either a value in a register or a register-indirect
443/// address. The convention is that a DBG_VALUE is indirect iff the
444/// second operand is an immediate.
445MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
446 const MCInstrDesc &MCID, bool IsIndirect,
447 Register Reg, const MDNode *Variable,
448 const MDNode *Expr);
449
450/// This version of the builder builds a DBG_VALUE intrinsic
451/// for a MachineOperand.
452MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
453 const MCInstrDesc &MCID, bool IsIndirect,
454 const MachineOperand &MO, const MDNode *Variable,
455 const MDNode *Expr);
456
457/// This version of the builder builds a DBG_VALUE or DBG_VALUE_LIST intrinsic
458/// for a MachineOperand.
459MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
460 const MCInstrDesc &MCID, bool IsIndirect,
461 ArrayRef<MachineOperand> MOs,
462 const MDNode *Variable, const MDNode *Expr);
463
464/// This version of the builder builds a DBG_VALUE intrinsic
465/// for either a value in a register or a register-indirect
466/// address and inserts it at position I.
467MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
468 MachineBasicBlock::iterator I, const DebugLoc &DL,
469 const MCInstrDesc &MCID, bool IsIndirect,
470 Register Reg, const MDNode *Variable,
471 const MDNode *Expr);
472
473/// This version of the builder builds a DBG_VALUE intrinsic
474/// for a machine operand and inserts it at position I.
475MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
476 MachineBasicBlock::iterator I, const DebugLoc &DL,
477 const MCInstrDesc &MCID, bool IsIndirect,
478 MachineOperand &MO, const MDNode *Variable,
479 const MDNode *Expr);
480
481/// This version of the builder builds a DBG_VALUE or DBG_VALUE_LIST intrinsic
482/// for a machine operand and inserts it at position I.
483MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
484 MachineBasicBlock::iterator I, const DebugLoc &DL,
485 const MCInstrDesc &MCID, bool IsIndirect,
486 ArrayRef<MachineOperand> MOs,
487 const MDNode *Variable, const MDNode *Expr);
488
489/// Clone a DBG_VALUE whose value has been spilled to FrameIndex.
490MachineInstr *buildDbgValueForSpill(MachineBasicBlock &BB,
491 MachineBasicBlock::iterator I,
492 const MachineInstr &Orig, int FrameIndex,
493 Register SpillReg);
494MachineInstr *
495buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I,
496 const MachineInstr &Orig, int FrameIndex,
497 SmallVectorImpl<const MachineOperand *> &SpilledOperands);
498
499/// Update a DBG_VALUE whose value has been spilled to FrameIndex. Useful when
500/// modifying an instruction in place while iterating over a basic block.
501void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg);
502
503inline unsigned getDefRegState(bool B) {
504 return B ? RegState::Define : 0;
505}
506inline unsigned getImplRegState(bool B) {
507 return B ? RegState::Implicit : 0;
508}
509inline unsigned getKillRegState(bool B) {
510 return B ? RegState::Kill : 0;
511}
512inline unsigned getDeadRegState(bool B) {
513 return B ? RegState::Dead : 0;
514}
515inline unsigned getUndefRegState(bool B) {
516 return B ? RegState::Undef : 0;
517}
518inline unsigned getInternalReadRegState(bool B) {
519 return B ? RegState::InternalRead : 0;
520}
521inline unsigned getDebugRegState(bool B) {
522 return B ? RegState::Debug : 0;
523}
524inline unsigned getRenamableRegState(bool B) {
525 return B ? RegState::Renamable : 0;
526}
527
528/// Get all register state flags from machine operand \p RegOp.
529inline unsigned getRegState(const MachineOperand &RegOp) {
530 assert(RegOp.isReg() && "Not a register operand")((RegOp.isReg() && "Not a register operand") ? static_cast
<void> (0) : __assert_fail ("RegOp.isReg() && \"Not a register operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 530, __PRETTY_FUNCTION__))
;
531 return getDefRegState(RegOp.isDef()) | getImplRegState(RegOp.isImplicit()) |
532 getKillRegState(RegOp.isKill()) | getDeadRegState(RegOp.isDead()) |
533 getUndefRegState(RegOp.isUndef()) |
534 getInternalReadRegState(RegOp.isInternalRead()) |
535 getDebugRegState(RegOp.isDebug()) |
536 getRenamableRegState(Register::isPhysicalRegister(RegOp.getReg()) &&
537 RegOp.isRenamable());
538}
539
540/// Helper class for constructing bundles of MachineInstrs.
541///
542/// MIBundleBuilder can create a bundle from scratch by inserting new
543/// MachineInstrs one at a time, or it can create a bundle from a sequence of
544/// existing MachineInstrs in a basic block.
545class MIBundleBuilder {
546 MachineBasicBlock &MBB;
547 MachineBasicBlock::instr_iterator Begin;
548 MachineBasicBlock::instr_iterator End;
549
550public:
551 /// Create an MIBundleBuilder that inserts instructions into a new bundle in
552 /// BB above the bundle or instruction at Pos.
553 MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator Pos)
554 : MBB(BB), Begin(Pos.getInstrIterator()), End(Begin) {}
555
556 /// Create a bundle from the sequence of instructions between B and E.
557 MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator B,
558 MachineBasicBlock::iterator E)
559 : MBB(BB), Begin(B.getInstrIterator()), End(E.getInstrIterator()) {
560 assert(B != E && "No instructions to bundle")((B != E && "No instructions to bundle") ? static_cast
<void> (0) : __assert_fail ("B != E && \"No instructions to bundle\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/MachineInstrBuilder.h"
, 560, __PRETTY_FUNCTION__))
;
561 ++B;
562 while (B != E) {
563 MachineInstr &MI = *B;
564 ++B;
565 MI.bundleWithPred();
566 }
567 }
568
569 /// Create an MIBundleBuilder representing an existing instruction or bundle
570 /// that has MI as its head.
571 explicit MIBundleBuilder(MachineInstr *MI)
572 : MBB(*MI->getParent()), Begin(MI),
573 End(getBundleEnd(MI->getIterator())) {}
574
575 /// Return a reference to the basic block containing this bundle.
576 MachineBasicBlock &getMBB() const { return MBB; }
577
578 /// Return true if no instructions have been inserted in this bundle yet.
579 /// Empty bundles aren't representable in a MachineBasicBlock.
580 bool empty() const { return Begin == End; }
581
582 /// Return an iterator to the first bundled instruction.
583 MachineBasicBlock::instr_iterator begin() const { return Begin; }
584
585 /// Return an iterator beyond the last bundled instruction.
586 MachineBasicBlock::instr_iterator end() const { return End; }
587
588 /// Insert MI into this bundle before I which must point to an instruction in
589 /// the bundle, or end().
590 MIBundleBuilder &insert(MachineBasicBlock::instr_iterator I,
591 MachineInstr *MI) {
592 MBB.insert(I, MI);
593 if (I == Begin) {
594 if (!empty())
595 MI->bundleWithSucc();
596 Begin = MI->getIterator();
597 return *this;
598 }
599 if (I == End) {
600 MI->bundleWithPred();
601 return *this;
602 }
603 // MI was inserted in the middle of the bundle, so its neighbors' flags are
604 // already fine. Update MI's bundle flags manually.
605 MI->setFlag(MachineInstr::BundledPred);
606 MI->setFlag(MachineInstr::BundledSucc);
607 return *this;
608 }
609
610 /// Insert MI into MBB by prepending it to the instructions in the bundle.
611 /// MI will become the first instruction in the bundle.
612 MIBundleBuilder &prepend(MachineInstr *MI) {
613 return insert(begin(), MI);
614 }
615
616 /// Insert MI into MBB by appending it to the instructions in the bundle.
617 /// MI will become the last instruction in the bundle.
618 MIBundleBuilder &append(MachineInstr *MI) {
619 return insert(end(), MI);
620 }
621};
622
623} // end namespace llvm
624
625#endif // LLVM_CODEGEN_MACHINEINSTRBUILDER_H