LLVM 22.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
17#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
35#include "llvm/IR/Function.h"
36#include "llvm/IR/IRBuilder.h"
37#include "llvm/IR/Module.h"
40using namespace llvm;
41
42
43//===----------------------------------------------------------------------===//
44// Calling Convention Implementation
45//===----------------------------------------------------------------------===//
46
47static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
48 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
49 ISD::ArgFlagsTy &ArgFlags, CCState &State)
50{
51 assert (ArgFlags.isSRet());
52
53 // Assign SRet argument.
54 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
55 0,
56 LocVT, LocInfo));
57 return true;
58}
59
60static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
61 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
62 ISD::ArgFlagsTy &ArgFlags, CCState &State)
63{
64 static const MCPhysReg RegList[] = {
65 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
66 };
67 // Try to get first reg.
68 if (Register Reg = State.AllocateReg(RegList)) {
69 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
70 } else {
71 // Assign whole thing in stack.
72 State.addLoc(CCValAssign::getCustomMem(
73 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
74 return true;
75 }
76
77 // Try to get second reg.
78 if (Register Reg = State.AllocateReg(RegList))
79 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
80 else
81 State.addLoc(CCValAssign::getCustomMem(
82 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
83 return true;
84}
85
86static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
87 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
88 ISD::ArgFlagsTy &ArgFlags, CCState &State)
89{
90 static const MCPhysReg RegList[] = {
91 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
92 };
93
94 // Try to get first reg.
95 if (Register Reg = State.AllocateReg(RegList))
96 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
97 else
98 return false;
99
100 // Try to get second reg.
101 if (Register Reg = State.AllocateReg(RegList))
102 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
103 else
104 return false;
105
106 return true;
107}
108
109// Allocate a full-sized argument for the 64-bit ABI.
110static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
111 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
112 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
113 assert((LocVT == MVT::f32 || LocVT == MVT::f128
114 || LocVT.getSizeInBits() == 64) &&
115 "Can't handle non-64 bits locations");
116
117 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
118 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
119 Align alignment =
120 (LocVT == MVT::f128 || ArgFlags.isSplit()) ? Align(16) : Align(8);
121 unsigned Offset = State.AllocateStack(size, alignment);
122 unsigned Reg = 0;
123
124 if (LocVT == MVT::i64 && Offset < 6*8)
125 // Promote integers to %i0-%i5.
126 Reg = SP::I0 + Offset/8;
127 else if (LocVT == MVT::f64 && Offset < 16*8)
128 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
129 Reg = SP::D0 + Offset/8;
130 else if (LocVT == MVT::f32 && Offset < 16*8)
131 // Promote floats to %f1, %f3, ...
132 Reg = SP::F1 + Offset/4;
133 else if (LocVT == MVT::f128 && Offset < 16*8)
134 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
135 Reg = SP::Q0 + Offset/16;
136
137 // Promote to register when possible, otherwise use the stack slot.
138 if (Reg) {
139 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
140 return true;
141 }
142
143 // Bail out if this is a return CC and we run out of registers to place
144 // values into.
145 if (IsReturn)
146 return false;
147
148 // This argument goes on the stack in an 8-byte slot.
149 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
150 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
151 if (LocVT == MVT::f32)
152 Offset += 4;
153
154 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
155 return true;
156}
157
158// Allocate a half-sized argument for the 64-bit ABI.
159//
160// This is used when passing { float, int } structs by value in registers.
161static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
162 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
163 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
164 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
165 unsigned Offset = State.AllocateStack(4, Align(4));
166
167 if (LocVT == MVT::f32 && Offset < 16*8) {
168 // Promote floats to %f0-%f31.
169 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
170 LocVT, LocInfo));
171 return true;
172 }
173
174 if (LocVT == MVT::i32 && Offset < 6*8) {
175 // Promote integers to %i0-%i5, using half the register.
176 unsigned Reg = SP::I0 + Offset/8;
177 LocVT = MVT::i64;
178 LocInfo = CCValAssign::AExt;
179
180 // Set the Custom bit if this i32 goes in the high bits of a register.
181 if (Offset % 8 == 0)
182 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
183 LocVT, LocInfo));
184 else
185 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
186 return true;
187 }
188
189 // Bail out if this is a return CC and we run out of registers to place
190 // values into.
191 if (IsReturn)
192 return false;
193
194 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
195 return true;
196}
197
198static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
199 CCValAssign::LocInfo &LocInfo,
200 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
201 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
202 State);
203}
204
205static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
206 CCValAssign::LocInfo &LocInfo,
207 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
208 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
209 State);
210}
211
212static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
213 CCValAssign::LocInfo &LocInfo,
214 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
215 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
216 State);
217}
218
219static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
220 CCValAssign::LocInfo &LocInfo,
221 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
222 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
223 State);
224}
225
226#include "SparcGenCallingConv.inc"
227
228// The calling conventions in SparcCallingConv.td are described in terms of the
229// callee's register window. This function translates registers to the
230// corresponding caller window %o register.
231static unsigned toCallerWindow(unsigned Reg) {
232 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
233 "Unexpected enum");
234 if (Reg >= SP::I0 && Reg <= SP::I7)
235 return Reg - SP::I0 + SP::O0;
236 return Reg;
237}
238
240 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
241 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
242 const Type *RetTy) const {
244 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
245 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
246 : RetCC_Sparc32);
247}
248
251 bool IsVarArg,
253 const SmallVectorImpl<SDValue> &OutVals,
254 const SDLoc &DL, SelectionDAG &DAG) const {
255 if (Subtarget->is64Bit())
256 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
257 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
258}
259
262 bool IsVarArg,
264 const SmallVectorImpl<SDValue> &OutVals,
265 const SDLoc &DL, SelectionDAG &DAG) const {
267
268 // CCValAssign - represent the assignment of the return value to locations.
270
271 // CCState - Info about the registers and stack slot.
272 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
273 *DAG.getContext());
274
275 // Analyze return values.
276 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
277
278 SDValue Glue;
279 SmallVector<SDValue, 4> RetOps(1, Chain);
280 // Make room for the return address offset.
281 RetOps.push_back(SDValue());
282
283 // Copy the result values into the output registers.
284 for (unsigned i = 0, realRVLocIdx = 0;
285 i != RVLocs.size();
286 ++i, ++realRVLocIdx) {
287 CCValAssign &VA = RVLocs[i];
288 assert(VA.isRegLoc() && "Can only return in registers!");
289
290 SDValue Arg = OutVals[realRVLocIdx];
291
292 if (VA.needsCustom()) {
293 assert(VA.getLocVT() == MVT::v2i32);
294 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
295 // happen by default if this wasn't a legal type)
296
297 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
298 Arg,
300 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
301 Arg,
303
304 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
305 Glue = Chain.getValue(1);
306 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
307 VA = RVLocs[++i]; // skip ahead to next loc
308 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
309 Glue);
310 } else
311 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
312
313 // Guarantee that all emitted copies are stuck together with flags.
314 Glue = Chain.getValue(1);
315 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
316 }
317
318 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
319 // If the function returns a struct, copy the SRetReturnReg to I0
320 if (MF.getFunction().hasStructRetAttr()) {
322 Register Reg = SFI->getSRetReturnReg();
323 if (!Reg)
324 llvm_unreachable("sret virtual register not created in the entry block");
325 auto PtrVT = getPointerTy(DAG.getDataLayout());
326 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
327 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
328 Glue = Chain.getValue(1);
329 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
330 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
331 }
332
333 RetOps[0] = Chain; // Update chain.
334 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
335
336 // Add the glue if we have it.
337 if (Glue.getNode())
338 RetOps.push_back(Glue);
339
340 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
341}
342
343// Lower return values for the 64-bit ABI.
344// Return values are passed the exactly the same way as function arguments.
347 bool IsVarArg,
349 const SmallVectorImpl<SDValue> &OutVals,
350 const SDLoc &DL, SelectionDAG &DAG) const {
351 // CCValAssign - represent the assignment of the return value to locations.
353
354 // CCState - Info about the registers and stack slot.
355 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
356 *DAG.getContext());
357
358 // Analyze return values.
359 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
360
361 SDValue Glue;
362 SmallVector<SDValue, 4> RetOps(1, Chain);
363
364 // The second operand on the return instruction is the return address offset.
365 // The return address is always %i7+8 with the 64-bit ABI.
366 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
367
368 // Copy the result values into the output registers.
369 for (unsigned i = 0; i != RVLocs.size(); ++i) {
370 CCValAssign &VA = RVLocs[i];
371 assert(VA.isRegLoc() && "Can only return in registers!");
372 SDValue OutVal = OutVals[i];
373
374 // Integer return values must be sign or zero extended by the callee.
375 switch (VA.getLocInfo()) {
376 case CCValAssign::Full: break;
378 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
379 break;
381 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
382 break;
384 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
385 break;
386 default:
387 llvm_unreachable("Unknown loc info!");
388 }
389
390 // The custom bit on an i32 return value indicates that it should be passed
391 // in the high bits of the register.
392 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
393 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
394 DAG.getConstant(32, DL, MVT::i32));
395
396 // The next value may go in the low bits of the same register.
397 // Handle both at once.
398 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
399 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
400 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
401 // Skip the next value, it's already done.
402 ++i;
403 }
404 }
405
406 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
407
408 // Guarantee that all emitted copies are stuck together with flags.
409 Glue = Chain.getValue(1);
410 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
411 }
412
413 RetOps[0] = Chain; // Update chain.
414
415 // Add the flag if we have it.
416 if (Glue.getNode())
417 RetOps.push_back(Glue);
418
419 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
420}
421
423 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
424 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
425 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
426 if (Subtarget->is64Bit())
427 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
428 DL, DAG, InVals);
429 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
430 DL, DAG, InVals);
431}
432
433/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
434/// passed in either one or two GPRs, including FP values. TODO: we should
435/// pass FP values in FP registers for fastcc functions.
437 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
438 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
439 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
441 MachineRegisterInfo &RegInfo = MF.getRegInfo();
443 EVT PtrVT = getPointerTy(DAG.getDataLayout());
444
445 // Assign locations to all of the incoming arguments.
447 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
448 *DAG.getContext());
449 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
450
451 const unsigned StackOffset = 92;
452 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
453
454 unsigned InIdx = 0;
455 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
456 CCValAssign &VA = ArgLocs[i];
457 EVT LocVT = VA.getLocVT();
458
459 if (Ins[InIdx].Flags.isSRet()) {
460 if (InIdx != 0)
461 report_fatal_error("sparc only supports sret on the first parameter");
462 // Get SRet from [%fp+64].
463 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
464 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
465 SDValue Arg =
466 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
467 InVals.push_back(Arg);
468 continue;
469 }
470
471 SDValue Arg;
472 if (VA.isRegLoc()) {
473 if (VA.needsCustom()) {
474 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
475
476 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
477 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
478 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
479
480 assert(i+1 < e);
481 CCValAssign &NextVA = ArgLocs[++i];
482
483 SDValue LoVal;
484 if (NextVA.isMemLoc()) {
485 int FrameIdx = MF.getFrameInfo().
486 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
487 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
488 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
489 } else {
490 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
491 &SP::IntRegsRegClass);
492 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
493 }
494
495 if (IsLittleEndian)
496 std::swap(LoVal, HiVal);
497
498 SDValue WholeValue =
499 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
500 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
501 InVals.push_back(WholeValue);
502 continue;
503 }
504 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
505 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
506 Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
507 if (VA.getLocInfo() != CCValAssign::Indirect) {
508 if (VA.getLocVT() == MVT::f32)
509 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
510 else if (VA.getLocVT() != MVT::i32) {
511 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
512 DAG.getValueType(VA.getLocVT()));
513 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
514 }
515 InVals.push_back(Arg);
516 continue;
517 }
518 } else {
519 assert(VA.isMemLoc());
520
521 unsigned Offset = VA.getLocMemOffset() + StackOffset;
522
523 if (VA.needsCustom()) {
524 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
525 // If it is double-word aligned, just load.
526 if (Offset % 8 == 0) {
527 int FI = MF.getFrameInfo().CreateFixedObject(8, Offset, true);
528 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
529 SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
531 InVals.push_back(Load);
532 continue;
533 }
534
535 int FI = MF.getFrameInfo().CreateFixedObject(4, Offset, true);
536 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
537 SDValue HiVal =
538 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
539 int FI2 = MF.getFrameInfo().CreateFixedObject(4, Offset + 4, true);
540 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
541
542 SDValue LoVal =
543 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
544
545 if (IsLittleEndian)
546 std::swap(LoVal, HiVal);
547
548 SDValue WholeValue =
549 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
550 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
551 InVals.push_back(WholeValue);
552 continue;
553 }
554
555 int FI = MF.getFrameInfo().CreateFixedObject(LocVT.getSizeInBits() / 8,
556 Offset, true);
557 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
558 SDValue Load = DAG.getLoad(LocVT, dl, Chain, FIPtr,
560 if (VA.getLocInfo() != CCValAssign::Indirect) {
561 InVals.push_back(Load);
562 continue;
563 }
564 Arg = Load;
565 }
566
568
569 SDValue ArgValue =
570 DAG.getLoad(VA.getValVT(), dl, Chain, Arg, MachinePointerInfo());
571 InVals.push_back(ArgValue);
572
573 unsigned ArgIndex = Ins[InIdx].OrigArgIndex;
574 assert(Ins[InIdx].PartOffset == 0);
575 while (i + 1 != e && Ins[InIdx + 1].OrigArgIndex == ArgIndex) {
576 CCValAssign &PartVA = ArgLocs[i + 1];
577 unsigned PartOffset = Ins[InIdx + 1].PartOffset;
579 ArgValue, TypeSize::getFixed(PartOffset), dl);
580 InVals.push_back(DAG.getLoad(PartVA.getValVT(), dl, Chain, Address,
582 ++i;
583 ++InIdx;
584 }
585 }
586
587 if (MF.getFunction().hasStructRetAttr()) {
588 // Copy the SRet Argument to SRetReturnReg.
590 Register Reg = SFI->getSRetReturnReg();
591 if (!Reg) {
592 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
593 SFI->setSRetReturnReg(Reg);
594 }
595 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
596 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
597 }
598
599 // Store remaining ArgRegs to the stack if this is a varargs function.
600 if (isVarArg) {
601 static const MCPhysReg ArgRegs[] = {
602 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
603 };
604 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
605 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
606 unsigned ArgOffset = CCInfo.getStackSize();
607 if (NumAllocated == 6)
608 ArgOffset += StackOffset;
609 else {
610 assert(!ArgOffset);
611 ArgOffset = 68+4*NumAllocated;
612 }
613
614 // Remember the vararg offset for the va_start implementation.
615 FuncInfo->setVarArgsFrameOffset(ArgOffset);
616
617 std::vector<SDValue> OutChains;
618
619 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
620 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
621 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
622 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
623
624 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
625 true);
626 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
627
628 OutChains.push_back(
629 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
630 ArgOffset += 4;
631 }
632
633 if (!OutChains.empty()) {
634 OutChains.push_back(Chain);
635 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
636 }
637 }
638
639 return Chain;
640}
641
642// Lower formal arguments for the 64 bit ABI.
644 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
645 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
646 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
648
649 // Analyze arguments according to CC_Sparc64.
651 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
652 *DAG.getContext());
653 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
654
655 // The argument array begins at %fp+BIAS+128, after the register save area.
656 const unsigned ArgArea = 128;
657
658 for (const CCValAssign &VA : ArgLocs) {
659 if (VA.isRegLoc()) {
660 // This argument is passed in a register.
661 // All integer register arguments are promoted by the caller to i64.
662
663 // Create a virtual register for the promoted live-in value.
664 Register VReg = MF.addLiveIn(VA.getLocReg(),
665 getRegClassFor(VA.getLocVT()));
666 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
667
668 // Get the high bits for i32 struct elements.
669 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
670 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
671 DAG.getConstant(32, DL, MVT::i32));
672
673 // The caller promoted the argument, so insert an Assert?ext SDNode so we
674 // won't promote the value again in this function.
675 switch (VA.getLocInfo()) {
677 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
678 DAG.getValueType(VA.getValVT()));
679 break;
681 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
682 DAG.getValueType(VA.getValVT()));
683 break;
684 default:
685 break;
686 }
687
688 // Truncate the register down to the argument type.
689 if (VA.isExtInLoc())
690 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
691
692 InVals.push_back(Arg);
693 continue;
694 }
695
696 // The registers are exhausted. This argument was passed on the stack.
697 assert(VA.isMemLoc());
698 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
699 // beginning of the arguments area at %fp+BIAS+128.
700 unsigned Offset = VA.getLocMemOffset() + ArgArea;
701 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
702 // Adjust offset for extended arguments, SPARC is big-endian.
703 // The caller will have written the full slot with extended bytes, but we
704 // prefer our own extending loads.
705 if (VA.isExtInLoc())
706 Offset += 8 - ValSize;
707 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
708 InVals.push_back(
709 DAG.getLoad(VA.getValVT(), DL, Chain,
712 }
713
714 if (!IsVarArg)
715 return Chain;
716
717 // This function takes variable arguments, some of which may have been passed
718 // in registers %i0-%i5. Variable floating point arguments are never passed
719 // in floating point registers. They go on %i0-%i5 or on the stack like
720 // integer arguments.
721 //
722 // The va_start intrinsic needs to know the offset to the first variable
723 // argument.
724 unsigned ArgOffset = CCInfo.getStackSize();
726 // Skip the 128 bytes of register save area.
727 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
728 Subtarget->getStackPointerBias());
729
730 // Save the variable arguments that were passed in registers.
731 // The caller is required to reserve stack space for 6 arguments regardless
732 // of how many arguments were actually passed.
733 SmallVector<SDValue, 8> OutChains;
734 for (; ArgOffset < 6*8; ArgOffset += 8) {
735 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
736 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
737 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
738 auto PtrVT = getPointerTy(MF.getDataLayout());
739 OutChains.push_back(
740 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
742 }
743
744 if (!OutChains.empty())
745 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
746
747 return Chain;
748}
749
750// Check whether any of the argument registers are reserved
752 const MachineFunction &MF) {
753 // The register window design means that outgoing parameters at O*
754 // will appear in the callee as I*.
755 // Be conservative and check both sides of the register names.
756 bool Outgoing =
757 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
758 return TRI->isReservedReg(MF, r);
759 });
760 bool Incoming =
761 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
762 return TRI->isReservedReg(MF, r);
763 });
764 return Outgoing || Incoming;
765}
766
768 const Function &F = MF.getFunction();
769 F.getContext().diagnose(DiagnosticInfoUnsupported{
770 F, ("SPARC doesn't support"
771 " function calls if any of the argument registers is reserved.")});
772}
773
776 SmallVectorImpl<SDValue> &InVals) const {
777 if (Subtarget->is64Bit())
778 return LowerCall_64(CLI, InVals);
779 return LowerCall_32(CLI, InVals);
780}
781
782static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
783 const CallBase *Call) {
784 if (Call)
785 return Call->hasFnAttr(Attribute::ReturnsTwice);
786
787 const Function *CalleeFn = nullptr;
789 CalleeFn = dyn_cast<Function>(G->getGlobal());
790 } else if (ExternalSymbolSDNode *E =
792 const Function &Fn = DAG.getMachineFunction().getFunction();
793 const Module *M = Fn.getParent();
794 const char *CalleeName = E->getSymbol();
795 CalleeFn = M->getFunction(CalleeName);
796 }
797
798 if (!CalleeFn)
799 return false;
800 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
801}
802
803/// IsEligibleForTailCallOptimization - Check whether the call is eligible
804/// for tail call optimization.
806 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
807
808 auto &Outs = CLI.Outs;
809 auto &Caller = MF.getFunction();
810
811 // Do not tail call opt functions with "disable-tail-calls" attribute.
812 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
813 return false;
814
815 // Do not tail call opt if the stack is used to pass parameters.
816 // 64-bit targets have a slightly higher limit since the ABI requires
817 // to allocate some space even when all the parameters fit inside registers.
818 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
819 if (CCInfo.getStackSize() > StackSizeLimit)
820 return false;
821
822 // Do not tail call opt if either the callee or caller returns
823 // a struct and the other does not.
824 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
825 return false;
826
827 // Byval parameters hand the function a pointer directly into the stack area
828 // we want to reuse during a tail call.
829 for (auto &Arg : Outs)
830 if (Arg.Flags.isByVal())
831 return false;
832
833 return true;
834}
835
836// Lower a call for the 32-bit ABI.
839 SmallVectorImpl<SDValue> &InVals) const {
840 SelectionDAG &DAG = CLI.DAG;
841 SDLoc &dl = CLI.DL;
843 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
845 SDValue Chain = CLI.Chain;
846 SDValue Callee = CLI.Callee;
847 bool &isTailCall = CLI.IsTailCall;
848 CallingConv::ID CallConv = CLI.CallConv;
849 bool isVarArg = CLI.IsVarArg;
851 LLVMContext &Ctx = *DAG.getContext();
852 EVT PtrVT = getPointerTy(MF.getDataLayout());
853
854 // Analyze operands of the call, assigning locations to each operand.
856 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
857 *DAG.getContext());
858 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
859
860 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
861 CCInfo, CLI, DAG.getMachineFunction());
862
863 // Get the size of the outgoing arguments stack space requirement.
864 unsigned ArgsSize = CCInfo.getStackSize();
865
866 // Keep stack frames 8-byte aligned.
867 ArgsSize = (ArgsSize+7) & ~7;
868
870
871 // Create local copies for byval args.
872 SmallVector<SDValue, 8> ByValArgs;
873 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
874 ISD::ArgFlagsTy Flags = Outs[i].Flags;
875 if (!Flags.isByVal())
876 continue;
877
878 SDValue Arg = OutVals[i];
879 unsigned Size = Flags.getByValSize();
880 Align Alignment = Flags.getNonZeroByValAlign();
881
882 if (Size > 0U) {
883 int FI = MFI.CreateStackObject(Size, Alignment, false);
884 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
885 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
886
887 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
888 false, // isVolatile,
889 (Size <= 32), // AlwaysInline if size <= 32,
890 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
892 ByValArgs.push_back(FIPtr);
893 }
894 else {
895 SDValue nullVal;
896 ByValArgs.push_back(nullVal);
897 }
898 }
899
900 assert(!isTailCall || ArgsSize == 0);
901
902 if (!isTailCall)
903 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
904
906 SmallVector<SDValue, 8> MemOpChains;
907
908 const unsigned StackOffset = 92;
909 bool hasStructRetAttr = false;
910 unsigned SRetArgSize = 0;
911 // Walk the register/memloc assignments, inserting copies/loads.
912 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
913 i != e;
914 ++i, ++realArgIdx) {
915 CCValAssign &VA = ArgLocs[i];
916 SDValue Arg = OutVals[realArgIdx];
917
918 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
919
920 // Use local copy if it is a byval arg.
921 if (Flags.isByVal()) {
922 Arg = ByValArgs[byvalArgIdx++];
923 if (!Arg) {
924 continue;
925 }
926 }
927
928 // Promote the value if needed.
929 switch (VA.getLocInfo()) {
930 default: llvm_unreachable("Unknown loc info!");
933 break;
935 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
936 break;
938 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
939 break;
941 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
942 break;
944 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
945 break;
946 }
947
948 if (Flags.isSRet()) {
949 assert(VA.needsCustom());
950
951 if (isTailCall)
952 continue;
953
954 // store SRet argument in %sp+64
955 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
956 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
957 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
958 MemOpChains.push_back(
959 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
960 hasStructRetAttr = true;
961 // sret only allowed on first argument
962 assert(Outs[realArgIdx].OrigArgIndex == 0);
963 SRetArgSize =
964 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
965 continue;
966 }
967
968 if (VA.needsCustom()) {
969 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
970
971 if (VA.isMemLoc()) {
972 unsigned Offset = VA.getLocMemOffset() + StackOffset;
973 // if it is double-word aligned, just store.
974 if (Offset % 8 == 0) {
975 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
976 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
977 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
978 MemOpChains.push_back(
979 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
980 continue;
981 }
982 }
983
984 if (VA.getLocVT() == MVT::f64) {
985 // Move from the float value from float registers into the
986 // integer registers.
988 Arg = bitcastConstantFPToInt(C, dl, DAG);
989 else
990 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
991 }
992
993 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
994 Arg,
995 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
996 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
997 Arg,
998 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
999
1000 if (VA.isRegLoc()) {
1001 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
1002 assert(i+1 != e);
1003 CCValAssign &NextVA = ArgLocs[++i];
1004 if (NextVA.isRegLoc()) {
1005 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
1006 } else {
1007 // Store the second part in stack.
1008 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
1009 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1010 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1011 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1012 MemOpChains.push_back(
1013 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1014 }
1015 } else {
1016 unsigned Offset = VA.getLocMemOffset() + StackOffset;
1017 // Store the first part.
1018 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1019 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1020 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1021 MemOpChains.push_back(
1022 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
1023 // Store the second part.
1024 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
1025 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1026 MemOpChains.push_back(
1027 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1028 }
1029 continue;
1030 }
1031
1032 if (VA.getLocInfo() == CCValAssign::Indirect) {
1033 // Store the argument in a stack slot and pass its address.
1034 unsigned ArgIndex = Outs[realArgIdx].OrigArgIndex;
1035 assert(Outs[realArgIdx].PartOffset == 0);
1036
1037 EVT SlotVT;
1038 if (i + 1 != e && Outs[realArgIdx + 1].OrigArgIndex == ArgIndex) {
1039 Type *OrigArgType = CLI.Args[ArgIndex].Ty;
1040 EVT OrigArgVT = getValueType(MF.getDataLayout(), OrigArgType);
1041 MVT PartVT =
1042 getRegisterTypeForCallingConv(Ctx, CLI.CallConv, OrigArgVT);
1043 unsigned N =
1044 getNumRegistersForCallingConv(Ctx, CLI.CallConv, OrigArgVT);
1045 SlotVT = EVT::getIntegerVT(Ctx, PartVT.getSizeInBits() * N);
1046 } else {
1047 SlotVT = Outs[realArgIdx].VT;
1048 }
1049
1050 SDValue SpillSlot = DAG.CreateStackTemporary(SlotVT);
1051 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1052 MemOpChains.push_back(
1053 DAG.getStore(Chain, dl, Arg, SpillSlot,
1055 // If the original argument was split (e.g. f128), we need
1056 // to store all parts of it here (and pass just one address).
1057 while (i + 1 != e && Outs[realArgIdx + 1].OrigArgIndex == ArgIndex) {
1058 SDValue PartValue = OutVals[realArgIdx + 1];
1059 unsigned PartOffset = Outs[realArgIdx + 1].PartOffset;
1061 DAG.getFrameIndex(FI, PtrVT), TypeSize::getFixed(PartOffset), dl);
1062 MemOpChains.push_back(
1063 DAG.getStore(Chain, dl, PartValue, Address,
1065 assert((PartOffset + PartValue.getValueType().getStoreSize() <=
1066 SlotVT.getStoreSize()) &&
1067 "Not enough space for argument part!");
1068 ++i;
1069 ++realArgIdx;
1070 }
1071
1072 Arg = SpillSlot;
1073 }
1074
1075 // Arguments that can be passed on register must be kept at
1076 // RegsToPass vector
1077 if (VA.isRegLoc()) {
1078 if (VA.getLocVT() != MVT::f32) {
1079 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1080 continue;
1081 }
1082 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1083 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1084 continue;
1085 }
1086
1087 assert(VA.isMemLoc());
1088
1089 // Create a store off the stack pointer for this argument.
1090 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1092 dl);
1093 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1094 MemOpChains.push_back(
1095 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1096 }
1097
1098
1099 // Emit all stores, make sure the occur before any copies into physregs.
1100 if (!MemOpChains.empty())
1101 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1102
1103 // Build a sequence of copy-to-reg nodes chained together with token
1104 // chain and flag operands which copy the outgoing args into registers.
1105 // The InGlue in necessary since all emitted instructions must be
1106 // stuck together.
1107 SDValue InGlue;
1108 for (const auto &[OrigReg, N] : RegsToPass) {
1109 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1110 Chain = DAG.getCopyToReg(Chain, dl, Reg, N, InGlue);
1111 InGlue = Chain.getValue(1);
1112 }
1113
1114 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1115
1116 // If the callee is a GlobalAddress node (quite common, every direct call is)
1117 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1118 // Likewise ExternalSymbol -> TargetExternalSymbol.
1120 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0);
1122 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1123
1124 // Returns a chain & a flag for retval copy to use
1125 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1127 Ops.push_back(Chain);
1128 Ops.push_back(Callee);
1129 if (hasStructRetAttr)
1130 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1131 for (const auto &[OrigReg, N] : RegsToPass) {
1132 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1133 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1134 }
1135
1136 // Add a register mask operand representing the call-preserved registers.
1137 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1138 const uint32_t *Mask =
1139 ((hasReturnsTwice)
1140 ? TRI->getRTCallPreservedMask(CallConv)
1141 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1142
1143 if (isAnyArgRegReserved(TRI, MF))
1145
1146 assert(Mask && "Missing call preserved mask for calling convention");
1147 Ops.push_back(DAG.getRegisterMask(Mask));
1148
1149 if (InGlue.getNode())
1150 Ops.push_back(InGlue);
1151
1152 if (isTailCall) {
1154 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1155 }
1156
1157 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1158 InGlue = Chain.getValue(1);
1159
1160 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1161 InGlue = Chain.getValue(1);
1162
1163 // Assign locations to each value returned by this call.
1165 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1166 *DAG.getContext());
1167
1168 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1169
1170 // Copy all of the result registers out of their specified physreg.
1171 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1172 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1173 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1174 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1176 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1177 Chain = Lo.getValue(1);
1178 InGlue = Lo.getValue(2);
1179 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1180 DAG.getConstant(0, dl, MVT::i32));
1182 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1183 Chain = Hi.getValue(1);
1184 InGlue = Hi.getValue(2);
1185 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1186 DAG.getConstant(1, dl, MVT::i32));
1187 InVals.push_back(Vec);
1188 } else {
1189 Chain =
1190 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1191 RVLocs[i].getValVT(), InGlue)
1192 .getValue(1);
1193 InGlue = Chain.getValue(2);
1194 InVals.push_back(Chain.getValue(0));
1195 }
1196 }
1197
1198 return Chain;
1199}
1200
1201// FIXME? Maybe this could be a TableGen attribute on some registers and
1202// this table could be generated automatically from RegInfo.
1204 const MachineFunction &MF) const {
1206 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1207 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1208 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1209 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1210 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1211 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1212 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1213 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1214 .Default(0);
1215
1216 // If we're directly referencing register names
1217 // (e.g in GCC C extension `register int r asm("g1");`),
1218 // make sure that said register is in the reserve list.
1219 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1220 if (!TRI->isReservedReg(MF, Reg))
1221 Reg = Register();
1222
1223 return Reg;
1224}
1225
1226// Fixup floating point arguments in the ... part of a varargs call.
1227//
1228// The SPARC v9 ABI requires that floating point arguments are treated the same
1229// as integers when calling a varargs function. This does not apply to the
1230// fixed arguments that are part of the function's prototype.
1231//
1232// This function post-processes a CCValAssign array created by
1233// AnalyzeCallOperands().
1236 for (CCValAssign &VA : ArgLocs) {
1237 MVT ValTy = VA.getLocVT();
1238 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1239 // varargs functions.
1240 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1241 continue;
1242 // The fixed arguments to a varargs function still go in FP registers.
1243 if (!Outs[VA.getValNo()].Flags.isVarArg())
1244 continue;
1245
1246 // This floating point argument should be reassigned.
1247 // Determine the offset into the argument array.
1248 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1249 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1250 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1251 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1252
1253 if (Offset < 6*8) {
1254 // This argument should go in %i0-%i5.
1255 unsigned IReg = SP::I0 + Offset/8;
1256 if (ValTy == MVT::f64)
1257 // Full register, just bitconvert into i64.
1258 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1260 else {
1261 assert(ValTy == MVT::f128 && "Unexpected type!");
1262 // Full register, just bitconvert into i128 -- We will lower this into
1263 // two i64s in LowerCall_64.
1264 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1265 MVT::i128, CCValAssign::BCvt);
1266 }
1267 } else {
1268 // This needs to go to memory, we're out of integer registers.
1269 VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,
1270 VA.getLocVT(), VA.getLocInfo());
1271 }
1272 }
1273}
1274
1275// Lower a call for the 64-bit ABI.
1276SDValue
1278 SmallVectorImpl<SDValue> &InVals) const {
1279 SelectionDAG &DAG = CLI.DAG;
1280 SDLoc DL = CLI.DL;
1281 SDValue Chain = CLI.Chain;
1282 auto PtrVT = getPointerTy(DAG.getDataLayout());
1284
1285 // Analyze operands of the call, assigning locations to each operand.
1287 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1288 *DAG.getContext());
1289 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1290
1292 CCInfo, CLI, DAG.getMachineFunction());
1293
1294 // Get the size of the outgoing arguments stack space requirement.
1295 // The stack offset computed by CC_Sparc64 includes all arguments.
1296 // Called functions expect 6 argument words to exist in the stack frame, used
1297 // or not.
1298 unsigned StackReserved = 6 * 8u;
1299 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1300
1301 // Keep stack frames 16-byte aligned.
1302 ArgsSize = alignTo(ArgsSize, 16);
1303
1304 // Varargs calls require special treatment.
1305 if (CLI.IsVarArg)
1306 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1307
1308 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1309
1310 // Adjust the stack pointer to make room for the arguments.
1311 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1312 // with more than 6 arguments.
1313 if (!CLI.IsTailCall)
1314 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1315
1316 // Collect the set of registers to pass to the function and their values.
1317 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1318 // instruction.
1320
1321 // Collect chains from all the memory opeations that copy arguments to the
1322 // stack. They must follow the stack pointer adjustment above and precede the
1323 // call instruction itself.
1324 SmallVector<SDValue, 8> MemOpChains;
1325
1326 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1327 const CCValAssign &VA = ArgLocs[i];
1328 SDValue Arg = CLI.OutVals[i];
1329
1330 // Promote the value if needed.
1331 switch (VA.getLocInfo()) {
1332 default:
1333 llvm_unreachable("Unknown location info!");
1334 case CCValAssign::Full:
1335 break;
1336 case CCValAssign::SExt:
1337 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1338 break;
1339 case CCValAssign::ZExt:
1340 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1341 break;
1342 case CCValAssign::AExt:
1343 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1344 break;
1345 case CCValAssign::BCvt:
1346 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1347 // SPARC does not support i128 natively. Lower it into two i64, see below.
1348 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1349 || VA.getLocVT() != MVT::i128)
1350 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1351 break;
1352 }
1353
1354 if (VA.isRegLoc()) {
1355 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1356 && VA.getLocVT() == MVT::i128) {
1357 // Store and reload into the integer register reg and reg+1.
1358 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1359 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1360 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1361 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1362 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1363 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1364 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1365
1366 // Store to %sp+BIAS+128+Offset
1367 SDValue Store =
1368 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1369 // Load into Reg and Reg+1
1370 SDValue Hi64 =
1371 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1372 SDValue Lo64 =
1373 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1374
1375 Register HiReg = VA.getLocReg();
1376 Register LoReg = VA.getLocReg() + 1;
1377 if (!CLI.IsTailCall) {
1378 HiReg = toCallerWindow(HiReg);
1379 LoReg = toCallerWindow(LoReg);
1380 }
1381
1382 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1383 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1384 continue;
1385 }
1386
1387 // The custom bit on an i32 return value indicates that it should be
1388 // passed in the high bits of the register.
1389 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1390 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1391 DAG.getConstant(32, DL, MVT::i32));
1392
1393 // The next value may go in the low bits of the same register.
1394 // Handle both at once.
1395 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1396 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1397 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1398 CLI.OutVals[i+1]);
1399 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1400 // Skip the next value, it's already done.
1401 ++i;
1402 }
1403 }
1404
1405 Register Reg = VA.getLocReg();
1406 if (!CLI.IsTailCall)
1407 Reg = toCallerWindow(Reg);
1408 RegsToPass.push_back(std::make_pair(Reg, Arg));
1409 continue;
1410 }
1411
1412 assert(VA.isMemLoc());
1413
1414 // Create a store off the stack pointer for this argument.
1415 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1416 // The argument area starts at %fp+BIAS+128 in the callee frame,
1417 // %sp+BIAS+128 in ours.
1418 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1419 Subtarget->getStackPointerBias() +
1420 128, DL);
1421 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1422 MemOpChains.push_back(
1423 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1424 }
1425
1426 // Emit all stores, make sure they occur before the call.
1427 if (!MemOpChains.empty())
1428 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1429
1430 // Build a sequence of CopyToReg nodes glued together with token chain and
1431 // glue operands which copy the outgoing args into registers. The InGlue is
1432 // necessary since all emitted instructions must be stuck together in order
1433 // to pass the live physical registers.
1434 SDValue InGlue;
1435 for (const auto &[Reg, N] : RegsToPass) {
1436 Chain = DAG.getCopyToReg(Chain, DL, Reg, N, InGlue);
1437 InGlue = Chain.getValue(1);
1438 }
1439
1440 // If the callee is a GlobalAddress node (quite common, every direct call is)
1441 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1442 // Likewise ExternalSymbol -> TargetExternalSymbol.
1443 SDValue Callee = CLI.Callee;
1444 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1446 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0);
1448 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1449
1450 // Build the operands for the call instruction itself.
1452 Ops.push_back(Chain);
1453 Ops.push_back(Callee);
1454 for (const auto &[Reg, N] : RegsToPass)
1455 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1456
1457 // Add a register mask operand representing the call-preserved registers.
1458 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1459 const uint32_t *Mask =
1460 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1461 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1462 CLI.CallConv));
1463
1464 if (isAnyArgRegReserved(TRI, MF))
1466
1467 assert(Mask && "Missing call preserved mask for calling convention");
1468 Ops.push_back(DAG.getRegisterMask(Mask));
1469
1470 // Make sure the CopyToReg nodes are glued to the call instruction which
1471 // consumes the registers.
1472 if (InGlue.getNode())
1473 Ops.push_back(InGlue);
1474
1475 // Now the call itself.
1476 if (CLI.IsTailCall) {
1478 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1479 }
1480 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1481 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1482 InGlue = Chain.getValue(1);
1483
1484 // Revert the stack pointer immediately after the call.
1485 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1486 InGlue = Chain.getValue(1);
1487
1488 // Now extract the return values. This is more or less the same as
1489 // LowerFormalArguments_64.
1490
1491 // Assign locations to each value returned by this call.
1493 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1494 *DAG.getContext());
1495
1496 // Set inreg flag manually for codegen generated library calls that
1497 // return float.
1498 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1499 CLI.Ins[0].Flags.setInReg();
1500
1501 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1502
1503 // Copy all of the result registers out of their specified physreg.
1504 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1505 CCValAssign &VA = RVLocs[i];
1506 assert(VA.isRegLoc() && "Can only return in registers!");
1507 unsigned Reg = toCallerWindow(VA.getLocReg());
1508
1509 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1510 // reside in the same register in the high and low bits. Reuse the
1511 // CopyFromReg previous node to avoid duplicate copies.
1512 SDValue RV;
1513 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1514 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1515 RV = Chain.getValue(0);
1516
1517 // But usually we'll create a new CopyFromReg for a different register.
1518 if (!RV.getNode()) {
1519 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1520 Chain = RV.getValue(1);
1521 InGlue = Chain.getValue(2);
1522 }
1523
1524 // Get the high bits for i32 struct elements.
1525 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1526 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1527 DAG.getConstant(32, DL, MVT::i32));
1528
1529 // The callee promoted the return value, so insert an Assert?ext SDNode so
1530 // we won't promote the value again in this function.
1531 switch (VA.getLocInfo()) {
1532 case CCValAssign::SExt:
1533 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1534 DAG.getValueType(VA.getValVT()));
1535 break;
1536 case CCValAssign::ZExt:
1537 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1538 DAG.getValueType(VA.getValVT()));
1539 break;
1540 default:
1541 break;
1542 }
1543
1544 // Truncate the register down to the return value type.
1545 if (VA.isExtInLoc())
1546 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1547
1548 InVals.push_back(RV);
1549 }
1550
1551 return Chain;
1552}
1553
1554//===----------------------------------------------------------------------===//
1555// TargetLowering Implementation
1556//===----------------------------------------------------------------------===//
1557
1565
1566/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1567/// rcond condition.
1569 switch (CC) {
1570 default:
1571 llvm_unreachable("Unknown/unsigned integer condition code!");
1572 case ISD::SETEQ:
1573 return SPCC::REG_Z;
1574 case ISD::SETNE:
1575 return SPCC::REG_NZ;
1576 case ISD::SETLT:
1577 return SPCC::REG_LZ;
1578 case ISD::SETGT:
1579 return SPCC::REG_GZ;
1580 case ISD::SETLE:
1581 return SPCC::REG_LEZ;
1582 case ISD::SETGE:
1583 return SPCC::REG_GEZ;
1584 }
1585}
1586
1587/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1588/// condition.
1590 switch (CC) {
1591 default: llvm_unreachable("Unknown integer condition code!");
1592 case ISD::SETEQ: return SPCC::ICC_E;
1593 case ISD::SETNE: return SPCC::ICC_NE;
1594 case ISD::SETLT: return SPCC::ICC_L;
1595 case ISD::SETGT: return SPCC::ICC_G;
1596 case ISD::SETLE: return SPCC::ICC_LE;
1597 case ISD::SETGE: return SPCC::ICC_GE;
1598 case ISD::SETULT: return SPCC::ICC_CS;
1599 case ISD::SETULE: return SPCC::ICC_LEU;
1600 case ISD::SETUGT: return SPCC::ICC_GU;
1601 case ISD::SETUGE: return SPCC::ICC_CC;
1602 }
1603}
1604
1605/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1606/// FCC condition.
1608 switch (CC) {
1609 default: llvm_unreachable("Unknown fp condition code!");
1610 case ISD::SETEQ:
1611 case ISD::SETOEQ: return SPCC::FCC_E;
1612 case ISD::SETNE:
1613 case ISD::SETUNE: return SPCC::FCC_NE;
1614 case ISD::SETLT:
1615 case ISD::SETOLT: return SPCC::FCC_L;
1616 case ISD::SETGT:
1617 case ISD::SETOGT: return SPCC::FCC_G;
1618 case ISD::SETLE:
1619 case ISD::SETOLE: return SPCC::FCC_LE;
1620 case ISD::SETGE:
1621 case ISD::SETOGE: return SPCC::FCC_GE;
1622 case ISD::SETULT: return SPCC::FCC_UL;
1623 case ISD::SETULE: return SPCC::FCC_ULE;
1624 case ISD::SETUGT: return SPCC::FCC_UG;
1625 case ISD::SETUGE: return SPCC::FCC_UGE;
1626 case ISD::SETUO: return SPCC::FCC_U;
1627 case ISD::SETO: return SPCC::FCC_O;
1628 case ISD::SETONE: return SPCC::FCC_LG;
1629 case ISD::SETUEQ: return SPCC::FCC_UE;
1630 }
1631}
1632
1634 const SparcSubtarget &STI)
1635 : TargetLowering(TM, STI), Subtarget(&STI) {
1636 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1637
1638 // Instructions which use registers as conditionals examine all the
1639 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1640 // matters much whether it's ZeroOrOneBooleanContent, or
1641 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1642 // former.
1645
1646 // Set up the register classes.
1647 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1648 if (!Subtarget->useSoftFloat()) {
1649 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1650 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1651 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1652 }
1653 if (Subtarget->is64Bit()) {
1654 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1655 } else {
1656 // On 32bit sparc, we define a double-register 32bit register
1657 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1658 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1659
1660 // ...but almost all operations must be expanded, so set that as
1661 // the default.
1662 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1663 setOperationAction(Op, MVT::v2i32, Expand);
1664 }
1665 // Truncating/extending stores/loads are also not supported.
1667 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1668 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1669 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1670
1671 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1672 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1673 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1674
1675 setTruncStoreAction(VT, MVT::v2i32, Expand);
1676 setTruncStoreAction(MVT::v2i32, VT, Expand);
1677 }
1678 // However, load and store *are* legal.
1679 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1680 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1683
1684 // And we need to promote i64 loads/stores into vector load/store
1687
1688 // Sadly, this doesn't work:
1689 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1690 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1691 }
1692
1693 // Turn FP extload into load/fpextend
1694 for (MVT VT : MVT::fp_valuetypes()) {
1695 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1696 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1697 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1698 }
1699
1700 // Sparc doesn't have i1 sign extending load
1701 for (MVT VT : MVT::integer_valuetypes())
1702 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1703
1704 // Turn FP truncstore into trunc + store.
1705 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1706 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1707 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1708 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1709 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1710 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1711
1712 // Custom legalize GlobalAddress nodes into LO/HI parts.
1717
1718 // Sparc doesn't have sext_inreg, replace them with shl/sra
1722
1723 // Sparc has no REM or DIVREM operations.
1728
1729 // ... nor does SparcV9.
1730 if (Subtarget->is64Bit()) {
1735 }
1736
1737 // Custom expand fp<->sint
1742
1743 // Custom Expand fp<->uint
1748
1749 // Lower f16 conversion operations into library calls
1756
1758 Subtarget->isVIS3() ? Legal : Expand);
1760 Subtarget->isVIS3() ? Legal : Expand);
1761
1762 // Sparc has no select or setcc: expand to SELECT_CC.
1767
1772
1773 // Sparc doesn't have BRCOND either, it has BR_CC.
1775 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1776 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1781
1786
1791
1792 if (Subtarget->isVIS3()) {
1795 }
1796
1797 if (Subtarget->is64Bit()) {
1799 Subtarget->isVIS3() ? Legal : Expand);
1801 Subtarget->isVIS3() ? Legal : Expand);
1806
1808 Subtarget->usePopc() ? Legal : Expand);
1810 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1811 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1813 }
1814
1815 // ATOMICs.
1816 // Atomics are supported on SparcV9. 32-bit atomics are also
1817 // supported by some Leon SparcV8 variants. Otherwise, atomics
1818 // are unsupported.
1819 if (Subtarget->isV9()) {
1820 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1821 // but it hasn't been implemented in the backend yet.
1822 if (Subtarget->is64Bit())
1824 else
1826 } else if (Subtarget->hasLeonCasa())
1828 else
1830
1832
1834
1836
1837 // Custom Lower Atomic LOAD/STORE
1840
1841 if (Subtarget->is64Bit()) {
1846 }
1847
1848 if (!Subtarget->isV9()) {
1849 // SparcV8 does not have FNEGD and FABSD.
1852 }
1853
1854 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1855 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1858 setOperationAction(ISD::FMA , MVT::f128, Expand);
1859 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1860 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1863 setOperationAction(ISD::FMA, MVT::f64,
1864 Subtarget->isUA2007() ? Legal : Expand);
1865 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1866 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1869 setOperationAction(ISD::FMA, MVT::f32,
1870 Subtarget->isUA2007() ? Legal : Expand);
1871 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1872 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1877 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1878 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1879 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1880
1884
1885 // Expands to [SU]MUL_LOHI.
1889
1890 if (Subtarget->useSoftMulDiv()) {
1891 // .umul works for both signed and unsigned
1896 }
1897
1898 if (Subtarget->is64Bit()) {
1902 Subtarget->isVIS3() ? Legal : Expand);
1904 Subtarget->isVIS3() ? Legal : Expand);
1905
1909 }
1910
1911 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1912 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1913 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1914 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1915
1916 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1918
1919 // Use the default implementation.
1920 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1921 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1926
1928
1930 Subtarget->usePopc() ? Legal : Expand);
1931
1932 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1933 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1934 setOperationAction(ISD::STORE, MVT::f128, Legal);
1935 } else {
1936 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1938 }
1939
1940 if (Subtarget->hasHardQuad()) {
1941 setOperationAction(ISD::FADD, MVT::f128, Legal);
1942 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1943 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1944 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1945 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1948 if (Subtarget->isV9()) {
1949 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1950 setOperationAction(ISD::FABS, MVT::f128, Legal);
1951 } else {
1952 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1953 setOperationAction(ISD::FABS, MVT::f128, Custom);
1954 }
1955 } else {
1956 // Custom legalize f128 operations.
1957
1958 setOperationAction(ISD::FADD, MVT::f128, Custom);
1959 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1960 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1961 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1963 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1964 setOperationAction(ISD::FABS, MVT::f128, Custom);
1965
1969 }
1970
1971 if (Subtarget->fixAllFDIVSQRT()) {
1972 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1973 // the former instructions generate errata on LEON processors.
1976 }
1977
1978 if (Subtarget->hasNoFMULS()) {
1980 }
1981
1982 // Custom combine bitcast between f64 and v2i32
1983 if (!Subtarget->is64Bit())
1985
1986 if (Subtarget->hasLeonCycleCounter())
1988
1989 if (Subtarget->isVIS3()) {
1994
1995 setOperationAction(ISD::CTTZ, MVT::i32,
1996 Subtarget->is64Bit() ? Promote : Expand);
1999 Subtarget->is64Bit() ? Promote : Expand);
2001 } else if (Subtarget->usePopc()) {
2006
2011 } else {
2015 Subtarget->is64Bit() ? Promote : LibCall);
2017
2018 // FIXME here we don't have any ISA extensions that could help us, so to
2019 // prevent large expansions those should be made into LibCalls.
2024 }
2025
2027
2028 // Some processors have no branch predictor and have pipelines longer than
2029 // what can be covered by the delay slot. This results in a stall, so mark
2030 // branches to be expensive on those processors.
2031 setJumpIsExpensive(Subtarget->hasNoPredictor());
2032 // The high cost of branching means that using conditional moves will
2033 // still be profitable even if the condition is predictable.
2035
2037
2038 computeRegisterProperties(Subtarget->getRegisterInfo());
2039}
2040
2042 return Subtarget->useSoftFloat();
2043}
2044
2046 EVT VT) const {
2047 if (!VT.isVector())
2048 return MVT::i32;
2050}
2051
2052/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2053/// be zero. Op is expected to be a target specific node. Used by DAG
2054/// combiner.
2056 (const SDValue Op,
2057 KnownBits &Known,
2058 const APInt &DemandedElts,
2059 const SelectionDAG &DAG,
2060 unsigned Depth) const {
2061 KnownBits Known2;
2062 Known.resetAll();
2063
2064 switch (Op.getOpcode()) {
2065 default: break;
2066 case SPISD::SELECT_ICC:
2067 case SPISD::SELECT_XCC:
2068 case SPISD::SELECT_FCC:
2069 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2070 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2071
2072 // Only known if known in both the LHS and RHS.
2073 Known = Known.intersectWith(Known2);
2074 break;
2075 }
2076}
2077
2078// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2079// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2081 ISD::CondCode CC, unsigned &SPCC) {
2082 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2083 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2084 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2085 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2086 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2087 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2088 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2089 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2090 SDValue CMPCC = LHS.getOperand(3);
2091 SPCC = LHS.getConstantOperandVal(2);
2092 LHS = CMPCC.getOperand(0);
2093 RHS = CMPCC.getOperand(1);
2094 }
2095}
2096
2097// Convert to a target node and set target flags.
2099 SelectionDAG &DAG) const {
2101 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2102 SDLoc(GA),
2103 GA->getValueType(0),
2104 GA->getOffset(), TF);
2105
2107 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2108 CP->getAlign(), CP->getOffset(), TF);
2109
2111 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2112 Op.getValueType(),
2113 0,
2114 TF);
2115
2117 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2118 ES->getValueType(0), TF);
2119
2120 llvm_unreachable("Unhandled address SDNode");
2121}
2122
2123// Split Op into high and low parts according to HiTF and LoTF.
2124// Return an ADD node combining the parts.
2126 unsigned HiTF, unsigned LoTF,
2127 SelectionDAG &DAG) const {
2128 SDLoc DL(Op);
2129 EVT VT = Op.getValueType();
2130 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2131 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2132 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2133}
2134
2135// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2136// or ExternalSymbol SDNode.
2138 SDLoc DL(Op);
2139 EVT VT = getPointerTy(DAG.getDataLayout());
2140
2141 // Handle PIC mode first. SPARC needs a got load for every variable!
2142 if (isPositionIndependent()) {
2143 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2144 PICLevel::Level picLevel = M->getPICLevel();
2145 SDValue Idx;
2146
2147 if (picLevel == PICLevel::SmallPIC) {
2148 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2149 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2150 withTargetFlags(Op, ELF::R_SPARC_GOT13, DAG));
2151 } else {
2152 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2153 Idx = makeHiLoPair(Op, ELF::R_SPARC_GOT22, ELF::R_SPARC_GOT10, DAG);
2154 }
2155
2156 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2157 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2158 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2159 // function has calls.
2161 MFI.setHasCalls(true);
2162 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2164 }
2165
2166 // This is one of the absolute code models.
2167 switch(getTargetMachine().getCodeModel()) {
2168 default:
2169 llvm_unreachable("Unsupported absolute code model");
2170 case CodeModel::Small:
2171 // abs32.
2172 return makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2173 case CodeModel::Medium: {
2174 // abs44.
2175 SDValue H44 = makeHiLoPair(Op, ELF::R_SPARC_H44, ELF::R_SPARC_M44, DAG);
2176 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2177 SDValue L44 = withTargetFlags(Op, ELF::R_SPARC_L44, DAG);
2178 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2179 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2180 }
2181 case CodeModel::Large: {
2182 // abs64.
2183 SDValue Hi = makeHiLoPair(Op, ELF::R_SPARC_HH22, ELF::R_SPARC_HM10, DAG);
2184 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2185 SDValue Lo = makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2186 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2187 }
2188 }
2189}
2190
2195
2200
2205
2207 SelectionDAG &DAG) const {
2208
2210 if (DAG.getTarget().useEmulatedTLS())
2211 return LowerToTLSEmulatedModel(GA, DAG);
2212
2213 SDLoc DL(GA);
2214 const GlobalValue *GV = GA->getGlobal();
2215 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2216
2218
2219 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2220 unsigned HiTF =
2221 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_HI22
2222 : ELF::R_SPARC_TLS_LDM_HI22);
2223 unsigned LoTF =
2224 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_LO10
2225 : ELF::R_SPARC_TLS_LDM_LO10);
2226 unsigned addTF =
2227 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_ADD
2228 : ELF::R_SPARC_TLS_LDM_ADD);
2229 unsigned callTF =
2230 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_CALL
2231 : ELF::R_SPARC_TLS_LDM_CALL);
2232
2233 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2234 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2235 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2236 withTargetFlags(Op, addTF, DAG));
2237
2238 SDValue Chain = DAG.getEntryNode();
2239 SDValue InGlue;
2240
2241 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2242 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2243 InGlue = Chain.getValue(1);
2244 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2245 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2246
2247 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2248 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2250 assert(Mask && "Missing call preserved mask for calling convention");
2251 SDValue Ops[] = {Chain,
2252 Callee,
2253 Symbol,
2254 DAG.getRegister(SP::O0, PtrVT),
2255 DAG.getRegisterMask(Mask),
2256 InGlue};
2257 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2258 InGlue = Chain.getValue(1);
2259 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
2260 InGlue = Chain.getValue(1);
2261 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2262
2263 if (model != TLSModel::LocalDynamic)
2264 return Ret;
2265
2266 SDValue Hi =
2267 DAG.getNode(SPISD::Hi, DL, PtrVT,
2268 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_HIX22, DAG));
2269 SDValue Lo =
2270 DAG.getNode(SPISD::Lo, DL, PtrVT,
2271 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_LOX10, DAG));
2272 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2273 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2274 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_ADD, DAG));
2275 }
2276
2277 if (model == TLSModel::InitialExec) {
2278 unsigned ldTF = ((PtrVT == MVT::i64) ? ELF::R_SPARC_TLS_IE_LDX
2279 : ELF::R_SPARC_TLS_IE_LD);
2280
2281 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2282
2283 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2284 // function has calls.
2286 MFI.setHasCalls(true);
2287
2288 SDValue TGA = makeHiLoPair(Op, ELF::R_SPARC_TLS_IE_HI22,
2289 ELF::R_SPARC_TLS_IE_LO10, DAG);
2290 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2291 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2292 DL, PtrVT, Ptr,
2293 withTargetFlags(Op, ldTF, DAG));
2294 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2295 DAG.getRegister(SP::G7, PtrVT), Offset,
2296 withTargetFlags(Op, ELF::R_SPARC_TLS_IE_ADD, DAG));
2297 }
2298
2299 assert(model == TLSModel::LocalExec);
2300 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2301 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_HIX22, DAG));
2302 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2303 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_LOX10, DAG));
2304 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2305
2306 return DAG.getNode(ISD::ADD, DL, PtrVT,
2307 DAG.getRegister(SP::G7, PtrVT), Offset);
2308}
2309
2311 ArgListTy &Args, SDValue Arg,
2312 const SDLoc &DL,
2313 SelectionDAG &DAG) const {
2315 EVT ArgVT = Arg.getValueType();
2316 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2317
2318 if (ArgTy->isFP128Ty()) {
2319 // Create a stack object and pass the pointer to the library function.
2320 int FI = MFI.CreateStackObject(16, Align(8), false);
2321 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2322 Chain = DAG.getStore(Chain, DL, Arg, FIPtr, MachinePointerInfo(), Align(8));
2323 Args.emplace_back(FIPtr, PointerType::getUnqual(ArgTy->getContext()));
2324 } else {
2325 Args.emplace_back(Arg, ArgTy);
2326 }
2327 return Chain;
2328}
2329
2330SDValue
2332 const char *LibFuncName,
2333 unsigned numArgs) const {
2334
2335 ArgListTy Args;
2336
2338 auto PtrVT = getPointerTy(DAG.getDataLayout());
2339
2340 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2341 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2342 Type *RetTyABI = RetTy;
2343 SDValue Chain = DAG.getEntryNode();
2344 SDValue RetPtr;
2345
2346 if (RetTy->isFP128Ty()) {
2347 // Create a Stack Object to receive the return value of type f128.
2348 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2349 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2350 ArgListEntry Entry(RetPtr, PointerType::getUnqual(RetTy->getContext()));
2351 if (!Subtarget->is64Bit()) {
2352 Entry.IsSRet = true;
2353 Entry.IndirectType = RetTy;
2354 }
2355 Entry.IsReturned = false;
2356 Args.push_back(Entry);
2357 RetTyABI = Type::getVoidTy(*DAG.getContext());
2358 }
2359
2360 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2361 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2362 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2363 }
2365 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2366 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2367
2368 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2369
2370 // chain is in second result.
2371 if (RetTyABI == RetTy)
2372 return CallInfo.first;
2373
2374 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2375
2376 Chain = CallInfo.second;
2377
2378 // Load RetPtr to get the return value.
2379 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2381}
2382
2384 unsigned &SPCC, const SDLoc &DL,
2385 SelectionDAG &DAG) const {
2386
2387 const char *LibCall = nullptr;
2388 bool is64Bit = Subtarget->is64Bit();
2389 switch(SPCC) {
2390 default: llvm_unreachable("Unhandled conditional code!");
2391 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2392 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2393 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2394 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2395 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2396 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2397 case SPCC::FCC_UL :
2398 case SPCC::FCC_ULE:
2399 case SPCC::FCC_UG :
2400 case SPCC::FCC_UGE:
2401 case SPCC::FCC_U :
2402 case SPCC::FCC_O :
2403 case SPCC::FCC_LG :
2404 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2405 }
2406
2407 auto PtrVT = getPointerTy(DAG.getDataLayout());
2408 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2409 Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2410 ArgListTy Args;
2411 SDValue Chain = DAG.getEntryNode();
2412 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2413 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2414
2416 CLI.setDebugLoc(DL).setChain(Chain)
2417 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2418
2419 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2420
2421 // result is in first, and chain is in second result.
2422 SDValue Result = CallInfo.first;
2423
2424 switch(SPCC) {
2425 default: {
2426 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2428 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2429 }
2430 case SPCC::FCC_UL : {
2431 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2432 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2433 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2435 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2436 }
2437 case SPCC::FCC_ULE: {
2438 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2440 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2441 }
2442 case SPCC::FCC_UG : {
2443 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2444 SPCC = SPCC::ICC_G;
2445 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2446 }
2447 case SPCC::FCC_UGE: {
2448 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2450 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2451 }
2452
2453 case SPCC::FCC_U : {
2454 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2455 SPCC = SPCC::ICC_E;
2456 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2457 }
2458 case SPCC::FCC_O : {
2459 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2461 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2462 }
2463 case SPCC::FCC_LG : {
2464 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2465 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2466 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2468 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2469 }
2470 case SPCC::FCC_UE : {
2471 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2472 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2473 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2474 SPCC = SPCC::ICC_E;
2475 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2476 }
2477 }
2478}
2479
2480static SDValue
2482 const SparcTargetLowering &TLI) {
2483
2484 if (Op.getOperand(0).getValueType() == MVT::f64)
2485 return TLI.LowerF128Op(Op, DAG,
2486 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2487
2488 if (Op.getOperand(0).getValueType() == MVT::f32)
2489 return TLI.LowerF128Op(Op, DAG,
2490 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2491
2492 llvm_unreachable("fpextend with non-float operand!");
2493 return SDValue();
2494}
2495
2496static SDValue
2498 const SparcTargetLowering &TLI) {
2499 // FP_ROUND on f64 and f32 are legal.
2500 if (Op.getOperand(0).getValueType() != MVT::f128)
2501 return Op;
2502
2503 if (Op.getValueType() == MVT::f64)
2504 return TLI.LowerF128Op(Op, DAG,
2505 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2506 if (Op.getValueType() == MVT::f32)
2507 return TLI.LowerF128Op(Op, DAG,
2508 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2509
2510 llvm_unreachable("fpround to non-float!");
2511 return SDValue();
2512}
2513
2515 const SparcTargetLowering &TLI,
2516 bool hasHardQuad) {
2517 SDLoc dl(Op);
2518 EVT VT = Op.getValueType();
2519 assert(VT == MVT::i32 || VT == MVT::i64);
2520
2521 // Expand f128 operations to fp128 abi calls.
2522 if (Op.getOperand(0).getValueType() == MVT::f128
2523 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2524 const char *libName = TLI.getLibcallName(VT == MVT::i32
2525 ? RTLIB::FPTOSINT_F128_I32
2526 : RTLIB::FPTOSINT_F128_I64);
2527 return TLI.LowerF128Op(Op, DAG, libName, 1);
2528 }
2529
2530 // Expand if the resulting type is illegal.
2531 if (!TLI.isTypeLegal(VT))
2532 return SDValue();
2533
2534 // Otherwise, Convert the fp value to integer in an FP register.
2535 if (VT == MVT::i32)
2536 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2537 else
2538 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2539
2540 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2541}
2542
2544 const SparcTargetLowering &TLI,
2545 bool hasHardQuad) {
2546 SDLoc dl(Op);
2547 EVT OpVT = Op.getOperand(0).getValueType();
2548 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2549
2550 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2551
2552 // Expand f128 operations to fp128 ABI calls.
2553 if (Op.getValueType() == MVT::f128
2554 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2555 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2556 ? RTLIB::SINTTOFP_I32_F128
2557 : RTLIB::SINTTOFP_I64_F128);
2558 return TLI.LowerF128Op(Op, DAG, libName, 1);
2559 }
2560
2561 // Expand if the operand type is illegal.
2562 if (!TLI.isTypeLegal(OpVT))
2563 return SDValue();
2564
2565 // Otherwise, Convert the int value to FP in an FP register.
2566 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2567 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2568 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2569}
2570
2572 const SparcTargetLowering &TLI,
2573 bool hasHardQuad) {
2574 EVT VT = Op.getValueType();
2575
2576 // Expand if it does not involve f128 or the target has support for
2577 // quad floating point instructions and the resulting type is legal.
2578 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2579 (hasHardQuad && TLI.isTypeLegal(VT)))
2580 return SDValue();
2581
2582 assert(VT == MVT::i32 || VT == MVT::i64);
2583
2584 return TLI.LowerF128Op(Op, DAG,
2585 TLI.getLibcallName(VT == MVT::i32
2586 ? RTLIB::FPTOUINT_F128_I32
2587 : RTLIB::FPTOUINT_F128_I64),
2588 1);
2589}
2590
2592 const SparcTargetLowering &TLI,
2593 bool hasHardQuad) {
2594 EVT OpVT = Op.getOperand(0).getValueType();
2595 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2596
2597 // Expand if it does not involve f128 or the target has support for
2598 // quad floating point instructions and the operand type is legal.
2599 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2600 return SDValue();
2601
2602 return TLI.LowerF128Op(Op, DAG,
2603 TLI.getLibcallName(OpVT == MVT::i32
2604 ? RTLIB::UINTTOFP_I32_F128
2605 : RTLIB::UINTTOFP_I64_F128),
2606 1);
2607}
2608
2610 const SparcTargetLowering &TLI, bool hasHardQuad,
2611 bool isV9, bool is64Bit) {
2612 SDValue Chain = Op.getOperand(0);
2613 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2614 SDValue LHS = Op.getOperand(2);
2615 SDValue RHS = Op.getOperand(3);
2616 SDValue Dest = Op.getOperand(4);
2617 SDLoc dl(Op);
2618 unsigned Opc, SPCC = ~0U;
2619
2620 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2621 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2623 assert(LHS.getValueType() == RHS.getValueType());
2624
2625 // Get the condition flag.
2626 SDValue CompareFlag;
2627 if (LHS.getValueType().isInteger()) {
2628 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2629 // and the RHS is zero we might be able to use a specialized branch.
2630 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2632 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2633 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2634 LHS);
2635
2636 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2637 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2638 if (isV9)
2639 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2640 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2641 else
2642 // Non-v9 targets don't have xcc.
2643 Opc = SPISD::BRICC;
2644 } else {
2645 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2646 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2647 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2648 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2649 } else {
2650 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2651 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2652 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2653 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2654 }
2655 }
2656 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2657 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2658}
2659
2661 const SparcTargetLowering &TLI, bool hasHardQuad,
2662 bool isV9, bool is64Bit) {
2663 SDValue LHS = Op.getOperand(0);
2664 SDValue RHS = Op.getOperand(1);
2665 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2666 SDValue TrueVal = Op.getOperand(2);
2667 SDValue FalseVal = Op.getOperand(3);
2668 SDLoc dl(Op);
2669 unsigned Opc, SPCC = ~0U;
2670
2671 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2672 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2674 assert(LHS.getValueType() == RHS.getValueType());
2675
2676 SDValue CompareFlag;
2677 if (LHS.getValueType().isInteger()) {
2678 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2679 // and the RHS is zero we might be able to use a specialized select.
2680 // All SELECT_CC between any two scalar integer types are eligible for
2681 // lowering to specialized instructions. Additionally, f32 and f64 types
2682 // are also eligible, but for f128 we can only use the specialized
2683 // instruction when we have hardquad.
2684 EVT ValType = TrueVal.getValueType();
2685 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2686 ValType == MVT::f64 ||
2687 (ValType == MVT::f128 && hasHardQuad);
2688 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2689 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2690 return DAG.getNode(
2691 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2692 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2693
2694 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2695 Opc = LHS.getValueType() == MVT::i32 ?
2696 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2697 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2698 } else {
2699 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2700 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2701 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2702 Opc = SPISD::SELECT_ICC;
2703 } else {
2704 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2705 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2706 Opc = SPISD::SELECT_FCC;
2707 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2708 }
2709 }
2710 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2711 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2712}
2713
2715 const SparcTargetLowering &TLI) {
2718 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2719
2720 // Need frame address to find the address of VarArgsFrameIndex.
2722
2723 // vastart just stores the address of the VarArgsFrameIndex slot into the
2724 // memory location argument.
2725 SDLoc DL(Op);
2726 SDValue Offset =
2727 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2728 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2729 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2730 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2731 MachinePointerInfo(SV));
2732}
2733
2735 SDNode *Node = Op.getNode();
2736 EVT VT = Node->getValueType(0);
2737 SDValue InChain = Node->getOperand(0);
2738 SDValue VAListPtr = Node->getOperand(1);
2739 EVT PtrVT = VAListPtr.getValueType();
2740 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2741 SDLoc DL(Node);
2742 SDValue VAList =
2743 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2744 // Increment the pointer, VAList, to the next vaarg.
2745 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2747 DL));
2748 // Store the incremented VAList to the legalized pointer.
2749 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2750 MachinePointerInfo(SV));
2751 // Load the actual argument out of the pointer VAList.
2752 // We can't count on greater alignment than the word size.
2753 return DAG.getLoad(
2754 VT, DL, InChain, VAList, MachinePointerInfo(),
2755 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2756}
2757
2759 const SparcSubtarget &Subtarget) {
2760 SDValue Chain = Op.getOperand(0);
2761 EVT VT = Op->getValueType(0);
2762 SDLoc DL(Op);
2763
2764 MCRegister SPReg = SP::O6;
2765 SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT);
2766
2767 // Unbias the stack pointer register.
2768 unsigned OffsetToStackStart = Subtarget.getStackPointerBias();
2769 // Move past the register save area: 8 in registers + 8 local registers.
2770 OffsetToStackStart += 16 * (Subtarget.is64Bit() ? 8 : 4);
2771 // Move past the struct return address slot (4 bytes) on SPARC 32-bit.
2772 if (!Subtarget.is64Bit())
2773 OffsetToStackStart += 4;
2774
2775 SDValue StackAddr = DAG.getNode(ISD::ADD, DL, VT, SP,
2776 DAG.getConstant(OffsetToStackStart, DL, VT));
2777 return DAG.getMergeValues({StackAddr, Chain}, DL);
2778}
2779
2781 const SparcSubtarget *Subtarget) {
2782 SDValue Chain = Op.getOperand(0);
2783 SDValue Size = Op.getOperand(1);
2784 SDValue Alignment = Op.getOperand(2);
2785 MaybeAlign MaybeAlignment =
2786 cast<ConstantSDNode>(Alignment)->getMaybeAlignValue();
2787 EVT VT = Size->getValueType(0);
2788 SDLoc dl(Op);
2789
2790 unsigned SPReg = SP::O6;
2791 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2792
2793 // The resultant pointer needs to be above the register spill area
2794 // at the bottom of the stack.
2795 unsigned regSpillArea;
2796 if (Subtarget->is64Bit()) {
2797 regSpillArea = 128;
2798 } else {
2799 // On Sparc32, the size of the spill area is 92. Unfortunately,
2800 // that's only 4-byte aligned, not 8-byte aligned (the stack
2801 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2802 // aligned dynamic allocation, we actually need to add 96 to the
2803 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2804
2805 // That also means adding 4 to the size of the allocation --
2806 // before applying the 8-byte rounding. Unfortunately, we the
2807 // value we get here has already had rounding applied. So, we need
2808 // to add 8, instead, wasting a bit more memory.
2809
2810 // Further, this only actually needs to be done if the required
2811 // alignment is > 4, but, we've lost that info by this point, too,
2812 // so we always apply it.
2813
2814 // (An alternative approach would be to always reserve 96 bytes
2815 // instead of the required 92, but then we'd waste 4 extra bytes
2816 // in every frame, not just those with dynamic stack allocations)
2817
2818 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2819
2820 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2821 DAG.getConstant(8, dl, VT));
2822 regSpillArea = 96;
2823 }
2824
2825 int64_t Bias = Subtarget->getStackPointerBias();
2826
2827 // Debias and increment SP past the reserved spill area.
2828 // We need the SP to point to the first usable region before calculating
2829 // anything to prevent any of the pointers from becoming out of alignment when
2830 // we rebias the SP later on.
2831 SDValue StartOfUsableStack = DAG.getNode(
2832 ISD::ADD, dl, VT, SP, DAG.getConstant(regSpillArea + Bias, dl, VT));
2833 SDValue AllocatedPtr =
2834 DAG.getNode(ISD::SUB, dl, VT, StartOfUsableStack, Size);
2835
2836 bool IsOveraligned = MaybeAlignment.has_value();
2837 SDValue AlignedPtr =
2838 IsOveraligned
2839 ? DAG.getNode(ISD::AND, dl, VT, AllocatedPtr,
2840 DAG.getSignedConstant(-MaybeAlignment->value(), dl, VT))
2841 : AllocatedPtr;
2842
2843 // Now that we are done, restore the bias and reserved spill area.
2844 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, AlignedPtr,
2845 DAG.getConstant(regSpillArea + Bias, dl, VT));
2846 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP);
2847 SDValue Ops[2] = {AlignedPtr, Chain};
2848 return DAG.getMergeValues(Ops, dl);
2849}
2850
2851
2853 SDLoc dl(Op);
2854 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2855 dl, MVT::Other, DAG.getEntryNode());
2856 return Chain;
2857}
2858
2860 const SparcSubtarget *Subtarget,
2861 bool AlwaysFlush = false) {
2863 MFI.setFrameAddressIsTaken(true);
2864
2865 EVT VT = Op.getValueType();
2866 SDLoc dl(Op);
2867 unsigned FrameReg = SP::I6;
2868 unsigned stackBias = Subtarget->getStackPointerBias();
2869
2870 SDValue FrameAddr;
2871 SDValue Chain;
2872
2873 // flush first to make sure the windowed registers' values are in stack
2874 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2875
2876 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2877
2878 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2879
2880 while (depth--) {
2881 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2882 DAG.getIntPtrConstant(Offset, dl));
2883 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2884 }
2885 if (Subtarget->is64Bit())
2886 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2887 DAG.getIntPtrConstant(stackBias, dl));
2888 return FrameAddr;
2889}
2890
2891
2893 const SparcSubtarget *Subtarget) {
2894
2895 uint64_t depth = Op.getConstantOperandVal(0);
2896
2897 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2898
2899}
2900
2902 const SparcTargetLowering &TLI,
2903 const SparcSubtarget *Subtarget) {
2905 MachineFrameInfo &MFI = MF.getFrameInfo();
2906 MFI.setReturnAddressIsTaken(true);
2907
2908 EVT VT = Op.getValueType();
2909 SDLoc dl(Op);
2910 uint64_t depth = Op.getConstantOperandVal(0);
2911
2912 SDValue RetAddr;
2913 if (depth == 0) {
2914 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2915 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2916 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2917 return RetAddr;
2918 }
2919
2920 // Need frame address to find return address of the caller.
2921 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2922
2923 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2924 SDValue Ptr = DAG.getNode(ISD::ADD,
2925 dl, VT,
2926 FrameAddr,
2927 DAG.getIntPtrConstant(Offset, dl));
2928 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2929
2930 return RetAddr;
2931}
2932
2933static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2934 unsigned opcode) {
2935 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2936 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2937
2938 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2939 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2940 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2941
2942 // Note: in little-endian, the floating-point value is stored in the
2943 // registers are in the opposite order, so the subreg with the sign
2944 // bit is the highest-numbered (odd), rather than the
2945 // lowest-numbered (even).
2946
2947 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2948 SrcReg64);
2949 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2950 SrcReg64);
2951
2952 if (DAG.getDataLayout().isLittleEndian())
2953 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2954 else
2955 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2956
2957 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2958 dl, MVT::f64), 0);
2959 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2960 DstReg64, Hi32);
2961 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2962 DstReg64, Lo32);
2963 return DstReg64;
2964}
2965
2966// Lower a f128 load into two f64 loads.
2968{
2969 SDLoc dl(Op);
2970 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2971 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2972
2973 Align Alignment = commonAlignment(LdNode->getBaseAlign(), 8);
2974
2975 SDValue Hi64 =
2976 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2977 LdNode->getPointerInfo(), Alignment);
2978 EVT addrVT = LdNode->getBasePtr().getValueType();
2979 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2980 LdNode->getBasePtr(),
2981 DAG.getConstant(8, dl, addrVT));
2982 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2983 LdNode->getPointerInfo().getWithOffset(8),
2984 Alignment);
2985
2986 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2987 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2988
2989 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2990 dl, MVT::f128);
2991 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2992 MVT::f128,
2993 SDValue(InFP128, 0),
2994 Hi64,
2995 SubRegEven);
2996 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2997 MVT::f128,
2998 SDValue(InFP128, 0),
2999 Lo64,
3000 SubRegOdd);
3001 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
3002 SDValue(Lo64.getNode(), 1) };
3003 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3004 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
3005 return DAG.getMergeValues(Ops, dl);
3006}
3007
3009{
3010 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
3011
3012 EVT MemVT = LdNode->getMemoryVT();
3013 if (MemVT == MVT::f128)
3014 return LowerF128Load(Op, DAG);
3015
3016 return Op;
3017}
3018
3019// Lower a f128 store into two f64 stores.
3021 SDLoc dl(Op);
3022 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
3023 assert(StNode->getOffset().isUndef() && "Unexpected node type");
3024
3025 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
3026 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
3027
3028 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3029 dl,
3030 MVT::f64,
3031 StNode->getValue(),
3032 SubRegEven);
3033 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3034 dl,
3035 MVT::f64,
3036 StNode->getValue(),
3037 SubRegOdd);
3038
3039 Align Alignment = commonAlignment(StNode->getBaseAlign(), 8);
3040
3041 SDValue OutChains[2];
3042 OutChains[0] =
3043 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
3044 StNode->getBasePtr(), StNode->getPointerInfo(),
3045 Alignment);
3046 EVT addrVT = StNode->getBasePtr().getValueType();
3047 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
3048 StNode->getBasePtr(),
3049 DAG.getConstant(8, dl, addrVT));
3050 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
3051 StNode->getPointerInfo().getWithOffset(8),
3052 Alignment);
3053 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3054}
3055
3057{
3058 SDLoc dl(Op);
3059 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3060
3061 EVT MemVT = St->getMemoryVT();
3062 if (MemVT == MVT::f128)
3063 return LowerF128Store(Op, DAG);
3064
3065 if (MemVT == MVT::i64) {
3066 // Custom handling for i64 stores: turn it into a bitcast and a
3067 // v2i32 store.
3068 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3069 SDValue Chain = DAG.getStore(
3070 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3071 St->getBaseAlign(), St->getMemOperand()->getFlags(), St->getAAInfo());
3072 return Chain;
3073 }
3074
3075 return SDValue();
3076}
3077
3079 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3080 && "invalid opcode");
3081
3082 SDLoc dl(Op);
3083
3084 if (Op.getValueType() == MVT::f64)
3085 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3086 if (Op.getValueType() != MVT::f128)
3087 return Op;
3088
3089 // Lower fabs/fneg on f128 to fabs/fneg on f64
3090 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3091 // (As with LowerF64Op, on little-endian, we need to negate the odd
3092 // subreg)
3093
3094 SDValue SrcReg128 = Op.getOperand(0);
3095 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3096 SrcReg128);
3097 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3098 SrcReg128);
3099
3100 if (DAG.getDataLayout().isLittleEndian()) {
3101 if (isV9)
3102 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3103 else
3104 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3105 } else {
3106 if (isV9)
3107 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3108 else
3109 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3110 }
3111
3112 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3113 dl, MVT::f128), 0);
3114 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3115 DstReg128, Hi64);
3116 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3117 DstReg128, Lo64);
3118 return DstReg128;
3119}
3120
3122 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3123 // Expand with a fence.
3124 return SDValue();
3125 }
3126
3127 // Monotonic load/stores are legal.
3128 return Op;
3129}
3130
3132 SelectionDAG &DAG) const {
3133 unsigned IntNo = Op.getConstantOperandVal(0);
3134 switch (IntNo) {
3135 default: return SDValue(); // Don't custom lower most intrinsics.
3136 case Intrinsic::thread_pointer: {
3137 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3138 return DAG.getRegister(SP::G7, PtrVT);
3139 }
3140 }
3141}
3142
3145
3146 bool hasHardQuad = Subtarget->hasHardQuad();
3147 bool isV9 = Subtarget->isV9();
3148 bool is64Bit = Subtarget->is64Bit();
3149
3150 switch (Op.getOpcode()) {
3151 default: llvm_unreachable("Should not custom lower this!");
3152
3153 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3154 Subtarget);
3155 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3156 Subtarget);
3158 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3159 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3160 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3161 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3162 hasHardQuad);
3163 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3164 hasHardQuad);
3165 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3166 hasHardQuad);
3167 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3168 hasHardQuad);
3169 case ISD::BR_CC:
3170 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3171 case ISD::SELECT_CC:
3172 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3173 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3174 case ISD::VAARG: return LowerVAARG(Op, DAG);
3176 Subtarget);
3177 case ISD::STACKADDRESS:
3178 return LowerSTACKADDRESS(Op, DAG, *Subtarget);
3179
3180 case ISD::LOAD: return LowerLOAD(Op, DAG);
3181 case ISD::STORE: return LowerSTORE(Op, DAG);
3182 case ISD::FADD: return LowerF128Op(Op, DAG,
3183 getLibcallName(RTLIB::ADD_F128), 2);
3184 case ISD::FSUB: return LowerF128Op(Op, DAG,
3185 getLibcallName(RTLIB::SUB_F128), 2);
3186 case ISD::FMUL: return LowerF128Op(Op, DAG,
3187 getLibcallName(RTLIB::MUL_F128), 2);
3188 case ISD::FDIV: return LowerF128Op(Op, DAG,
3189 getLibcallName(RTLIB::DIV_F128), 2);
3190 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3191 getLibcallName(RTLIB::SQRT_F128),1);
3192 case ISD::FABS:
3193 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3194 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3195 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3196 case ISD::ATOMIC_LOAD:
3197 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3199 }
3200}
3201
3203 const SDLoc &DL,
3204 SelectionDAG &DAG) const {
3205 APInt V = C->getValueAPF().bitcastToAPInt();
3206 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3207 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3208 if (DAG.getDataLayout().isLittleEndian())
3209 std::swap(Lo, Hi);
3210 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3211}
3212
3214 DAGCombinerInfo &DCI) const {
3215 SDLoc dl(N);
3216 SDValue Src = N->getOperand(0);
3217
3218 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3219 Src.getSimpleValueType() == MVT::f64)
3221
3222 return SDValue();
3223}
3224
3226 DAGCombinerInfo &DCI) const {
3227 switch (N->getOpcode()) {
3228 default:
3229 break;
3230 case ISD::BITCAST:
3231 return PerformBITCASTCombine(N, DCI);
3232 }
3233 return SDValue();
3234}
3235
3238 MachineBasicBlock *BB) const {
3239 switch (MI.getOpcode()) {
3240 default: llvm_unreachable("Unknown SELECT_CC!");
3241 case SP::SELECT_CC_Int_ICC:
3242 case SP::SELECT_CC_FP_ICC:
3243 case SP::SELECT_CC_DFP_ICC:
3244 case SP::SELECT_CC_QFP_ICC:
3245 if (Subtarget->isV9())
3246 return expandSelectCC(MI, BB, SP::BPICC);
3247 return expandSelectCC(MI, BB, SP::BCOND);
3248 case SP::SELECT_CC_Int_XCC:
3249 case SP::SELECT_CC_FP_XCC:
3250 case SP::SELECT_CC_DFP_XCC:
3251 case SP::SELECT_CC_QFP_XCC:
3252 return expandSelectCC(MI, BB, SP::BPXCC);
3253 case SP::SELECT_CC_Int_FCC:
3254 case SP::SELECT_CC_FP_FCC:
3255 case SP::SELECT_CC_DFP_FCC:
3256 case SP::SELECT_CC_QFP_FCC:
3257 if (Subtarget->isV9())
3258 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3259 return expandSelectCC(MI, BB, SP::FBCOND);
3260 }
3261}
3262
3265 unsigned BROpcode) const {
3266 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3267 DebugLoc dl = MI.getDebugLoc();
3268 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3269
3270 // To "insert" a SELECT_CC instruction, we actually have to insert the
3271 // triangle control-flow pattern. The incoming instruction knows the
3272 // destination vreg to set, the condition code register to branch on, the
3273 // true/false values to select between, and the condition code for the branch.
3274 //
3275 // We produce the following control flow:
3276 // ThisMBB
3277 // | \
3278 // | IfFalseMBB
3279 // | /
3280 // SinkMBB
3281 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3283
3284 MachineBasicBlock *ThisMBB = BB;
3285 MachineFunction *F = BB->getParent();
3286 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3287 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3288 F->insert(It, IfFalseMBB);
3289 F->insert(It, SinkMBB);
3290
3291 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3292 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3293 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3294 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3295
3296 // Set the new successors for ThisMBB.
3297 ThisMBB->addSuccessor(IfFalseMBB);
3298 ThisMBB->addSuccessor(SinkMBB);
3299
3300 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3301 .addMBB(SinkMBB)
3302 .addImm(CC);
3303
3304 // IfFalseMBB just falls through to SinkMBB.
3305 IfFalseMBB->addSuccessor(SinkMBB);
3306
3307 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3308 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3309 MI.getOperand(0).getReg())
3310 .addReg(MI.getOperand(1).getReg())
3311 .addMBB(ThisMBB)
3312 .addReg(MI.getOperand(2).getReg())
3313 .addMBB(IfFalseMBB);
3314
3315 MI.eraseFromParent(); // The pseudo instruction is gone now.
3316 return SinkMBB;
3317}
3318
3319//===----------------------------------------------------------------------===//
3320// Sparc Inline Assembly Support
3321//===----------------------------------------------------------------------===//
3322
3323/// getConstraintType - Given a constraint letter, return the type of
3324/// constraint it is for this target.
3327 if (Constraint.size() == 1) {
3328 switch (Constraint[0]) {
3329 default: break;
3330 case 'r':
3331 case 'f':
3332 case 'e':
3333 return C_RegisterClass;
3334 case 'I': // SIMM13
3335 return C_Immediate;
3336 }
3337 }
3338
3339 return TargetLowering::getConstraintType(Constraint);
3340}
3341
3344 const char *constraint) const {
3346 Value *CallOperandVal = info.CallOperandVal;
3347 // If we don't have a value, we can't do a match,
3348 // but allow it at the lowest weight.
3349 if (!CallOperandVal)
3350 return CW_Default;
3351
3352 // Look at the constraint type.
3353 switch (*constraint) {
3354 default:
3356 break;
3357 case 'I': // SIMM13
3358 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3359 if (isInt<13>(C->getSExtValue()))
3360 weight = CW_Constant;
3361 }
3362 break;
3363 }
3364 return weight;
3365}
3366
3367/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3368/// vector. If it is invalid, don't add anything to Ops.
3370 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3371 SelectionDAG &DAG) const {
3372 SDValue Result;
3373
3374 // Only support length 1 constraints for now.
3375 if (Constraint.size() > 1)
3376 return;
3377
3378 char ConstraintLetter = Constraint[0];
3379 switch (ConstraintLetter) {
3380 default: break;
3381 case 'I':
3383 if (isInt<13>(C->getSExtValue())) {
3384 Result = DAG.getSignedTargetConstant(C->getSExtValue(), SDLoc(Op),
3385 Op.getValueType());
3386 break;
3387 }
3388 return;
3389 }
3390 }
3391
3392 if (Result.getNode()) {
3393 Ops.push_back(Result);
3394 return;
3395 }
3397}
3398
3399std::pair<unsigned, const TargetRegisterClass *>
3401 StringRef Constraint,
3402 MVT VT) const {
3403 if (Constraint.empty())
3404 return std::make_pair(0U, nullptr);
3405
3406 if (Constraint.size() == 1) {
3407 switch (Constraint[0]) {
3408 case 'r':
3409 if (VT == MVT::v2i32)
3410 return std::make_pair(0U, &SP::IntPairRegClass);
3411 else if (Subtarget->is64Bit())
3412 return std::make_pair(0U, &SP::I64RegsRegClass);
3413 else
3414 return std::make_pair(0U, &SP::IntRegsRegClass);
3415 case 'f':
3416 if (VT == MVT::f32 || VT == MVT::i32)
3417 return std::make_pair(0U, &SP::FPRegsRegClass);
3418 else if (VT == MVT::f64 || VT == MVT::i64)
3419 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3420 else if (VT == MVT::f128)
3421 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3422 // This will generate an error message
3423 return std::make_pair(0U, nullptr);
3424 case 'e':
3425 if (VT == MVT::f32 || VT == MVT::i32)
3426 return std::make_pair(0U, &SP::FPRegsRegClass);
3427 else if (VT == MVT::f64 || VT == MVT::i64 )
3428 return std::make_pair(0U, &SP::DFPRegsRegClass);
3429 else if (VT == MVT::f128)
3430 return std::make_pair(0U, &SP::QFPRegsRegClass);
3431 // This will generate an error message
3432 return std::make_pair(0U, nullptr);
3433 }
3434 }
3435
3436 if (Constraint.front() != '{')
3437 return std::make_pair(0U, nullptr);
3438
3439 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3440 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3441 if (RegName.empty())
3442 return std::make_pair(0U, nullptr);
3443
3444 unsigned long long RegNo;
3445 // Handle numbered register aliases.
3446 if (RegName[0] == 'r' &&
3447 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3448 // r0-r7 -> g0-g7
3449 // r8-r15 -> o0-o7
3450 // r16-r23 -> l0-l7
3451 // r24-r31 -> i0-i7
3452 if (RegNo > 31)
3453 return std::make_pair(0U, nullptr);
3454 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3455 char RegType = RegTypes[RegNo / 8];
3456 char RegIndex = '0' + (RegNo % 8);
3457 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3458 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3459 }
3460
3461 // Rewrite the fN constraint according to the value type if needed.
3462 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3463 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3464 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3466 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3467 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3469 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3470 } else {
3471 return std::make_pair(0U, nullptr);
3472 }
3473 }
3474
3475 auto ResultPair =
3477 if (!ResultPair.second)
3478 return std::make_pair(0U, nullptr);
3479
3480 // Force the use of I64Regs over IntRegs for 64-bit values.
3481 if (Subtarget->is64Bit() && VT == MVT::i64) {
3482 assert(ResultPair.second == &SP::IntRegsRegClass &&
3483 "Unexpected register class");
3484 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3485 }
3486
3487 return ResultPair;
3488}
3489
3490bool
3492 // The Sparc target isn't yet aware of offsets.
3493 return false;
3494}
3495
3498 SelectionDAG &DAG) const {
3499
3500 SDLoc dl(N);
3501
3502 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3503
3504 switch (N->getOpcode()) {
3505 default:
3506 llvm_unreachable("Do not know how to custom type legalize this operation!");
3507
3508 case ISD::FP_TO_SINT:
3509 case ISD::FP_TO_UINT:
3510 // Custom lower only if it involves f128 or i64.
3511 if (N->getOperand(0).getValueType() != MVT::f128
3512 || N->getValueType(0) != MVT::i64)
3513 return;
3514 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3515 ? RTLIB::FPTOSINT_F128_I64
3516 : RTLIB::FPTOUINT_F128_I64);
3517
3518 Results.push_back(LowerF128Op(SDValue(N, 0),
3519 DAG,
3520 getLibcallName(libCall),
3521 1));
3522 return;
3523 case ISD::READCYCLECOUNTER: {
3524 assert(Subtarget->hasLeonCycleCounter());
3525 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3526 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3527 SDValue Ops[] = { Lo, Hi };
3528 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3529 Results.push_back(Pair);
3530 Results.push_back(N->getOperand(0));
3531 return;
3532 }
3533 case ISD::SINT_TO_FP:
3534 case ISD::UINT_TO_FP:
3535 // Custom lower only if it involves f128 or i64.
3536 if (N->getValueType(0) != MVT::f128
3537 || N->getOperand(0).getValueType() != MVT::i64)
3538 return;
3539
3540 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3541 ? RTLIB::SINTTOFP_I64_F128
3542 : RTLIB::UINTTOFP_I64_F128);
3543
3544 Results.push_back(LowerF128Op(SDValue(N, 0),
3545 DAG,
3546 getLibcallName(libCall),
3547 1));
3548 return;
3549 case ISD::LOAD: {
3551 // Custom handling only for i64: turn i64 load into a v2i32 load,
3552 // and a bitcast.
3553 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3554 return;
3555
3556 SDLoc dl(N);
3557 SDValue LoadRes = DAG.getExtLoad(
3558 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3559 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getBaseAlign(),
3560 Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3561
3562 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3563 Results.push_back(Res);
3564 Results.push_back(LoadRes.getValue(1));
3565 return;
3566 }
3567 }
3568}
3569
3570// Override to enable LOAD_STACK_GUARD lowering on Linux.
3572 if (!Subtarget->getTargetTriple().isOSLinux())
3574 return true;
3575}
3576
3578 if (Subtarget->isVIS3())
3579 return VT == MVT::f32 || VT == MVT::f64;
3580 return false;
3581}
3582
3584 bool ForCodeSize) const {
3585 if (VT != MVT::f32 && VT != MVT::f64)
3586 return false;
3587 if (Subtarget->isVIS() && Imm.isZero())
3588 return true;
3589 if (Subtarget->isVIS3())
3590 return Imm.isExactlyValue(+0.5) || Imm.isExactlyValue(-0.5) ||
3591 Imm.getExactLog2Abs() == -1;
3592 return false;
3593}
3594
3595bool SparcTargetLowering::isCtlzFast() const { return Subtarget->isVIS3(); }
3596
3598 // We lack native cttz, however,
3599 // On 64-bit targets it is cheap to implement it in terms of popc.
3600 if (Subtarget->is64Bit() && Subtarget->usePopc())
3601 return true;
3602 // Otherwise, implementing cttz in terms of ctlz is still cheap.
3603 return isCheapToSpeculateCtlz(Ty);
3604}
3605
3607 EVT VT) const {
3608 return Subtarget->isUA2007() && !Subtarget->useSoftFloat();
3609}
3610
3612 SDNode *Node) const {
3613 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3614 // If the result is dead, replace it with %g0.
3615 if (!Node->hasAnyUseOfValue(0))
3616 MI.getOperand(0).setReg(SP::G0);
3617}
3618
3620 Instruction *Inst,
3621 AtomicOrdering Ord) const {
3622 bool HasStoreSemantics =
3624 if (HasStoreSemantics && isReleaseOrStronger(Ord))
3625 return Builder.CreateFence(AtomicOrdering::Release);
3626 return nullptr;
3627}
3628
3630 Instruction *Inst,
3631 AtomicOrdering Ord) const {
3632 // V8 loads already come with implicit acquire barrier so there's no need to
3633 // emit it again.
3634 bool HasLoadSemantics = isa<AtomicCmpXchgInst, AtomicRMWInst, LoadInst>(Inst);
3635 if (Subtarget->isV9() && HasLoadSemantics && isAcquireOrStronger(Ord))
3636 return Builder.CreateFence(AtomicOrdering::Acquire);
3637
3638 // SC plain stores would need a trailing full barrier.
3640 return Builder.CreateFence(Ord);
3641 return nullptr;
3642}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:54
#define G(x, y, z)
Definition MD5.cpp:55
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static constexpr MCPhysReg SPReg
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerSTACKADDRESS(SDValue Op, SelectionDAG &DAG, const SparcSubtarget &Subtarget)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI, const MachineFunction &MF)
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static void emitReservedArgRegCallError(const MachineFunction &MF)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
This file describes how to lower LLVM code to machine code.
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
LLVM Basic Block Representation.
Definition BasicBlock.h:62
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:214
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
A debug info location.
Definition DebugLoc.h:123
Diagnostic information for unsupported feature in backend.
const Function & getFunction() const
Definition Function.h:164
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition Function.h:687
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getBaseAlign() const
Returns alignment and volatility of the memory access.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool useSoftFloat() const override
SDValue bitcastConstantFPToInt(ConstantFPSDNode *C, const SDLoc &DL, SelectionDAG &DAG) const
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool IsEligibleForTailCallOptimization(CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
computeKnownBitsForTargetNode - Determine which of the bits specified in Mask are known to be either ...
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue PerformBITCASTCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerF128Compare(SDValue LHS, SDValue RHS, unsigned &SPCC, const SDLoc &DL, SelectionDAG &DAG) const
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
char back() const
back - Get the last character in the string.
Definition StringRef.h:155
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
char front() const
front - Get the first character in the string.
Definition StringRef.h:149
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
bool isFP128Ty() const
Return true if this is 'fp128'.
Definition Type.h:162
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:818
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:787
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ STACKADDRESS
STACKADDRESS - Represents the llvm.stackaddress intrinsic.
Definition ISDOpcodes.h:127
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:778
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:294
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:852
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ GlobalAddress
Definition ISDOpcodes.h:88
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:879
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:992
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:254
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ GlobalTLSAddress
Definition ISDOpcodes.h:89
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:843
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:786
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:795
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:233
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:230
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:703
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:764
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:849
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:810
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:887
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:977
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:925
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:738
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:205
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:304
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:565
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:958
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:855
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:832
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:556
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
@ FCC_ULE
Definition Sparc.h:74
@ FCC_UG
Definition Sparc.h:64
@ ICC_G
Definition Sparc.h:46
@ REG_LEZ
Definition Sparc.h:97
@ ICC_L
Definition Sparc.h:49
@ FCC_NE
Definition Sparc.h:68
@ ICC_CS
Definition Sparc.h:53
@ FCC_LG
Definition Sparc.h:67
@ ICC_LEU
Definition Sparc.h:51
@ FCC_LE
Definition Sparc.h:73
@ ICC_LE
Definition Sparc.h:47
@ FCC_U
Definition Sparc.h:62
@ ICC_GE
Definition Sparc.h:48
@ FCC_E
Definition Sparc.h:69
@ REG_LZ
Definition Sparc.h:98
@ FCC_L
Definition Sparc.h:65
@ ICC_GU
Definition Sparc.h:50
@ FCC_O
Definition Sparc.h:75
@ ICC_NE
Definition Sparc.h:44
@ FCC_UE
Definition Sparc.h:70
@ REG_NZ
Definition Sparc.h:99
@ ICC_E
Definition Sparc.h:45
@ FCC_GE
Definition Sparc.h:71
@ FCC_UGE
Definition Sparc.h:72
@ REG_Z
Definition Sparc.h:96
@ ICC_CC
Definition Sparc.h:52
@ REG_GEZ
Definition Sparc.h:101
@ FCC_G
Definition Sparc.h:63
@ FCC_UL
Definition Sparc.h:66
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
std::string utostr(uint64_t X, bool isNeg=false)
bool isReleaseOrStronger(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
bool isAcquireOrStronger(AtomicOrdering AO)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition ValueTypes.h:94
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:395
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition KnownBits.h:314
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})