LLVM 23.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
17#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
35#include "llvm/IR/Function.h"
36#include "llvm/IR/IRBuilder.h"
37#include "llvm/IR/Module.h"
40using namespace llvm;
41
42
43//===----------------------------------------------------------------------===//
44// Calling Convention Implementation
45//===----------------------------------------------------------------------===//
46
47static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
48 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
49 ISD::ArgFlagsTy &ArgFlags, CCState &State)
50{
51 assert (ArgFlags.isSRet());
52
53 // Assign SRet argument.
54 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
55 0,
56 LocVT, LocInfo));
57 return true;
58}
59
60static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
61 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
62 ISD::ArgFlagsTy &ArgFlags, CCState &State)
63{
64 static const MCPhysReg RegList[] = {
65 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
66 };
67 // Try to get first reg.
68 if (Register Reg = State.AllocateReg(RegList)) {
69 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
70 } else {
71 // Assign whole thing in stack.
72 State.addLoc(CCValAssign::getCustomMem(
73 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
74 return true;
75 }
76
77 // Try to get second reg.
78 if (Register Reg = State.AllocateReg(RegList))
79 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
80 else
81 State.addLoc(CCValAssign::getCustomMem(
82 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
83 return true;
84}
85
86static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
87 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
88 ISD::ArgFlagsTy &ArgFlags, CCState &State)
89{
90 static const MCPhysReg RegList[] = {
91 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
92 };
93
94 // Try to get first reg.
95 if (Register Reg = State.AllocateReg(RegList))
96 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
97 else
98 return false;
99
100 // Try to get second reg.
101 if (Register Reg = State.AllocateReg(RegList))
102 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
103 else
104 return false;
105
106 return true;
107}
108
109// Allocate a full-sized argument for the 64-bit ABI.
110static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
111 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
112 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
113 assert((LocVT == MVT::f32 || LocVT == MVT::f128
114 || LocVT.getSizeInBits() == 64) &&
115 "Can't handle non-64 bits locations");
116
117 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
118 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
119 Align alignment =
120 (LocVT == MVT::f128 || ArgFlags.isSplit()) ? Align(16) : Align(8);
121 unsigned Offset = State.AllocateStack(size, alignment);
122 unsigned Reg = 0;
123
124 if (LocVT == MVT::i64 && Offset < 6*8)
125 // Promote integers to %i0-%i5.
126 Reg = SP::I0 + Offset/8;
127 else if (LocVT == MVT::f64 && Offset < 16*8)
128 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
129 Reg = SP::D0 + Offset/8;
130 else if (LocVT == MVT::f32 && Offset < 16*8)
131 // Promote floats to %f1, %f3, ...
132 Reg = SP::F1 + Offset/4;
133 else if (LocVT == MVT::f128 && Offset < 16*8)
134 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
135 Reg = SP::Q0 + Offset/16;
136
137 // Promote to register when possible, otherwise use the stack slot.
138 if (Reg) {
139 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
140 return true;
141 }
142
143 // Bail out if this is a return CC and we run out of registers to place
144 // values into.
145 if (IsReturn)
146 return false;
147
148 // This argument goes on the stack in an 8-byte slot.
149 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
150 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
151 if (LocVT == MVT::f32)
152 Offset += 4;
153
154 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
155 return true;
156}
157
158// Allocate a half-sized argument for the 64-bit ABI.
159//
160// This is used when passing { float, int } structs by value in registers.
161static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
162 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
163 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
164 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
165 unsigned Offset = State.AllocateStack(4, Align(4));
166
167 if (LocVT == MVT::f32 && Offset < 16*8) {
168 // Promote floats to %f0-%f31.
169 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
170 LocVT, LocInfo));
171 return true;
172 }
173
174 if (LocVT == MVT::i32 && Offset < 6*8) {
175 // Promote integers to %i0-%i5, using half the register.
176 unsigned Reg = SP::I0 + Offset/8;
177 LocVT = MVT::i64;
178 LocInfo = CCValAssign::AExt;
179
180 // Set the Custom bit if this i32 goes in the high bits of a register.
181 if (Offset % 8 == 0)
182 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
183 LocVT, LocInfo));
184 else
185 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
186 return true;
187 }
188
189 // Bail out if this is a return CC and we run out of registers to place
190 // values into.
191 if (IsReturn)
192 return false;
193
194 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
195 return true;
196}
197
198static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
199 CCValAssign::LocInfo &LocInfo,
200 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
201 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
202 State);
203}
204
205static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
206 CCValAssign::LocInfo &LocInfo,
207 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
208 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
209 State);
210}
211
212static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
213 CCValAssign::LocInfo &LocInfo,
214 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
215 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
216 State);
217}
218
219static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
220 CCValAssign::LocInfo &LocInfo,
221 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
222 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
223 State);
224}
225
226#include "SparcGenCallingConv.inc"
227
228// The calling conventions in SparcCallingConv.td are described in terms of the
229// callee's register window. This function translates registers to the
230// corresponding caller window %o register.
231static unsigned toCallerWindow(unsigned Reg) {
232 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
233 "Unexpected enum");
234 if (Reg >= SP::I0 && Reg <= SP::I7)
235 return Reg - SP::I0 + SP::O0;
236 return Reg;
237}
238
240 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
241 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
242 const Type *RetTy) const {
244 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
245 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
246 : RetCC_Sparc32);
247}
248
251 bool IsVarArg,
253 const SmallVectorImpl<SDValue> &OutVals,
254 const SDLoc &DL, SelectionDAG &DAG) const {
255 if (Subtarget->is64Bit())
256 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
257 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
258}
259
262 bool IsVarArg,
264 const SmallVectorImpl<SDValue> &OutVals,
265 const SDLoc &DL, SelectionDAG &DAG) const {
267
268 // CCValAssign - represent the assignment of the return value to locations.
270
271 // CCState - Info about the registers and stack slot.
272 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
273 *DAG.getContext());
274
275 // Analyze return values.
276 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
277
278 SDValue Glue;
279 SmallVector<SDValue, 4> RetOps(1, Chain);
280 // Make room for the return address offset.
281 RetOps.push_back(SDValue());
282
283 // Copy the result values into the output registers.
284 for (unsigned i = 0, realRVLocIdx = 0;
285 i != RVLocs.size();
286 ++i, ++realRVLocIdx) {
287 CCValAssign &VA = RVLocs[i];
288 assert(VA.isRegLoc() && "Can only return in registers!");
289
290 SDValue Arg = OutVals[realRVLocIdx];
291
292 if (VA.needsCustom()) {
293 assert(VA.getLocVT() == MVT::v2i32);
294 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
295 // happen by default if this wasn't a legal type)
296
297 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
298 Arg,
300 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
301 Arg,
303
304 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
305 Glue = Chain.getValue(1);
306 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
307 VA = RVLocs[++i]; // skip ahead to next loc
308 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
309 Glue);
310 } else
311 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
312
313 // Guarantee that all emitted copies are stuck together with flags.
314 Glue = Chain.getValue(1);
315 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
316 }
317
318 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
319 // If the function returns a struct, copy the SRetReturnReg to I0
320 if (MF.getFunction().hasStructRetAttr()) {
322 Register Reg = SFI->getSRetReturnReg();
323 if (!Reg)
324 llvm_unreachable("sret virtual register not created in the entry block");
325 auto PtrVT = getPointerTy(DAG.getDataLayout());
326 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
327 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
328 Glue = Chain.getValue(1);
329 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
330 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
331 }
332
333 RetOps[0] = Chain; // Update chain.
334 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
335
336 // Add the glue if we have it.
337 if (Glue.getNode())
338 RetOps.push_back(Glue);
339
340 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
341}
342
343// Lower return values for the 64-bit ABI.
344// Return values are passed the exactly the same way as function arguments.
347 bool IsVarArg,
349 const SmallVectorImpl<SDValue> &OutVals,
350 const SDLoc &DL, SelectionDAG &DAG) const {
351 // CCValAssign - represent the assignment of the return value to locations.
353
354 // CCState - Info about the registers and stack slot.
355 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
356 *DAG.getContext());
357
358 // Analyze return values.
359 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
360
361 SDValue Glue;
362 SmallVector<SDValue, 4> RetOps(1, Chain);
363
364 // The second operand on the return instruction is the return address offset.
365 // The return address is always %i7+8 with the 64-bit ABI.
366 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
367
368 // Copy the result values into the output registers.
369 for (unsigned i = 0; i != RVLocs.size(); ++i) {
370 CCValAssign &VA = RVLocs[i];
371 assert(VA.isRegLoc() && "Can only return in registers!");
372 SDValue OutVal = OutVals[i];
373
374 // Integer return values must be sign or zero extended by the callee.
375 switch (VA.getLocInfo()) {
376 case CCValAssign::Full: break;
378 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
379 break;
381 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
382 break;
384 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
385 break;
386 default:
387 llvm_unreachable("Unknown loc info!");
388 }
389
390 // The custom bit on an i32 return value indicates that it should be passed
391 // in the high bits of the register.
392 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
393 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
394 DAG.getConstant(32, DL, MVT::i32));
395
396 // The next value may go in the low bits of the same register.
397 // Handle both at once.
398 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
399 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
400 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
401 // Skip the next value, it's already done.
402 ++i;
403 }
404 }
405
406 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
407
408 // Guarantee that all emitted copies are stuck together with flags.
409 Glue = Chain.getValue(1);
410 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
411 }
412
413 RetOps[0] = Chain; // Update chain.
414
415 // Add the flag if we have it.
416 if (Glue.getNode())
417 RetOps.push_back(Glue);
418
419 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
420}
421
423 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
424 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
425 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
426 if (Subtarget->is64Bit())
427 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
428 DL, DAG, InVals);
429 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
430 DL, DAG, InVals);
431}
432
433/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
434/// passed in either one or two GPRs, including FP values. TODO: we should
435/// pass FP values in FP registers for fastcc functions.
437 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
438 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
439 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
441 MachineRegisterInfo &RegInfo = MF.getRegInfo();
443 EVT PtrVT = getPointerTy(DAG.getDataLayout());
444
445 // Assign locations to all of the incoming arguments.
447 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
448 *DAG.getContext());
449 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
450
451 const unsigned StackOffset = 92;
452 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
453
454 unsigned InIdx = 0;
455 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
456 CCValAssign &VA = ArgLocs[i];
457 EVT LocVT = VA.getLocVT();
458
459 if (Ins[InIdx].Flags.isSRet()) {
460 if (InIdx != 0)
461 report_fatal_error("sparc only supports sret on the first parameter");
462 // Get SRet from [%fp+64].
463 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
464 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
465 SDValue Arg =
466 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
467 InVals.push_back(Arg);
468 continue;
469 }
470
471 SDValue Arg;
472 if (VA.isRegLoc()) {
473 if (VA.needsCustom()) {
474 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
475
476 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
477 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
478 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
479
480 assert(i+1 < e);
481 CCValAssign &NextVA = ArgLocs[++i];
482
483 SDValue LoVal;
484 if (NextVA.isMemLoc()) {
485 int FrameIdx = MF.getFrameInfo().
486 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
487 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
488 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
489 } else {
490 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
491 &SP::IntRegsRegClass);
492 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
493 }
494
495 if (IsLittleEndian)
496 std::swap(LoVal, HiVal);
497
498 SDValue WholeValue =
499 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
500 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
501 InVals.push_back(WholeValue);
502 continue;
503 }
504 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
505 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
506 Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
507 if (VA.getLocInfo() != CCValAssign::Indirect) {
508 if (VA.getLocVT() == MVT::f32)
509 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
510 else if (VA.getLocVT() != MVT::i32) {
511 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
512 DAG.getValueType(VA.getLocVT()));
513 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
514 }
515 InVals.push_back(Arg);
516 continue;
517 }
518 } else {
519 assert(VA.isMemLoc());
520
521 unsigned Offset = VA.getLocMemOffset() + StackOffset;
522
523 if (VA.needsCustom()) {
524 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
525 // If it is double-word aligned, just load.
526 if (Offset % 8 == 0) {
527 int FI = MF.getFrameInfo().CreateFixedObject(8, Offset, true);
528 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
529 SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
531 InVals.push_back(Load);
532 continue;
533 }
534
535 int FI = MF.getFrameInfo().CreateFixedObject(4, Offset, true);
536 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
537 SDValue HiVal =
538 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
539 int FI2 = MF.getFrameInfo().CreateFixedObject(4, Offset + 4, true);
540 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
541
542 SDValue LoVal =
543 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
544
545 if (IsLittleEndian)
546 std::swap(LoVal, HiVal);
547
548 SDValue WholeValue =
549 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
550 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
551 InVals.push_back(WholeValue);
552 continue;
553 }
554
555 int FI = MF.getFrameInfo().CreateFixedObject(LocVT.getSizeInBits() / 8,
556 Offset, true);
557 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
558 SDValue Load = DAG.getLoad(LocVT, dl, Chain, FIPtr,
560 if (VA.getLocInfo() != CCValAssign::Indirect) {
561 InVals.push_back(Load);
562 continue;
563 }
564 Arg = Load;
565 }
566
568
569 SDValue ArgValue =
570 DAG.getLoad(VA.getValVT(), dl, Chain, Arg, MachinePointerInfo());
571 InVals.push_back(ArgValue);
572
573 unsigned ArgIndex = Ins[InIdx].OrigArgIndex;
574 assert(Ins[InIdx].PartOffset == 0);
575 while (i + 1 != e && Ins[InIdx + 1].OrigArgIndex == ArgIndex) {
576 CCValAssign &PartVA = ArgLocs[i + 1];
577 unsigned PartOffset = Ins[InIdx + 1].PartOffset;
579 ArgValue, TypeSize::getFixed(PartOffset), dl);
580 InVals.push_back(DAG.getLoad(PartVA.getValVT(), dl, Chain, Address,
582 ++i;
583 ++InIdx;
584 }
585 }
586
587 if (MF.getFunction().hasStructRetAttr()) {
588 // Copy the SRet Argument to SRetReturnReg.
590 Register Reg = SFI->getSRetReturnReg();
591 if (!Reg) {
592 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
593 SFI->setSRetReturnReg(Reg);
594 }
595 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
596 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
597 }
598
599 // Store remaining ArgRegs to the stack if this is a varargs function.
600 if (isVarArg) {
601 static const MCPhysReg ArgRegs[] = {
602 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
603 };
604 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
605 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
606 unsigned ArgOffset = CCInfo.getStackSize();
607 if (NumAllocated == 6)
608 ArgOffset += StackOffset;
609 else {
610 assert(!ArgOffset);
611 ArgOffset = 68+4*NumAllocated;
612 }
613
614 // Remember the vararg offset for the va_start implementation.
615 FuncInfo->setVarArgsFrameOffset(ArgOffset);
616
617 std::vector<SDValue> OutChains;
618
619 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
620 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
621 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
622 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
623
624 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
625 true);
626 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
627
628 OutChains.push_back(
629 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
630 ArgOffset += 4;
631 }
632
633 if (!OutChains.empty()) {
634 OutChains.push_back(Chain);
635 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
636 }
637 }
638
639 return Chain;
640}
641
642// Lower formal arguments for the 64 bit ABI.
644 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
645 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
646 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
648
649 // Analyze arguments according to CC_Sparc64.
651 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
652 *DAG.getContext());
653 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
654
655 // The argument array begins at %fp+BIAS+128, after the register save area.
656 const unsigned ArgArea = 128;
657
658 for (const CCValAssign &VA : ArgLocs) {
659 if (VA.isRegLoc()) {
660 // This argument is passed in a register.
661 // All integer register arguments are promoted by the caller to i64.
662
663 // Create a virtual register for the promoted live-in value.
664 Register VReg = MF.addLiveIn(VA.getLocReg(),
665 getRegClassFor(VA.getLocVT()));
666 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
667
668 // Get the high bits for i32 struct elements.
669 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
670 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
671 DAG.getConstant(32, DL, MVT::i32));
672
673 // The caller promoted the argument, so insert an Assert?ext SDNode so we
674 // won't promote the value again in this function.
675 switch (VA.getLocInfo()) {
677 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
678 DAG.getValueType(VA.getValVT()));
679 break;
681 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
682 DAG.getValueType(VA.getValVT()));
683 break;
684 default:
685 break;
686 }
687
688 // Truncate the register down to the argument type.
689 if (VA.isExtInLoc())
690 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
691
692 InVals.push_back(Arg);
693 continue;
694 }
695
696 // The registers are exhausted. This argument was passed on the stack.
697 assert(VA.isMemLoc());
698 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
699 // beginning of the arguments area at %fp+BIAS+128.
700 unsigned Offset = VA.getLocMemOffset() + ArgArea;
701 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
702 // Adjust offset for extended arguments, SPARC is big-endian.
703 // The caller will have written the full slot with extended bytes, but we
704 // prefer our own extending loads.
705 if (VA.isExtInLoc())
706 Offset += 8 - ValSize;
707 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
708 InVals.push_back(
709 DAG.getLoad(VA.getValVT(), DL, Chain,
712 }
713
714 if (!IsVarArg)
715 return Chain;
716
717 // This function takes variable arguments, some of which may have been passed
718 // in registers %i0-%i5. Variable floating point arguments are never passed
719 // in floating point registers. They go on %i0-%i5 or on the stack like
720 // integer arguments.
721 //
722 // The va_start intrinsic needs to know the offset to the first variable
723 // argument.
724 unsigned ArgOffset = CCInfo.getStackSize();
726 // Skip the 128 bytes of register save area.
727 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
728 Subtarget->getStackPointerBias());
729
730 // Save the variable arguments that were passed in registers.
731 // The caller is required to reserve stack space for 6 arguments regardless
732 // of how many arguments were actually passed.
733 SmallVector<SDValue, 8> OutChains;
734 for (; ArgOffset < 6*8; ArgOffset += 8) {
735 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
736 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
737 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
738 auto PtrVT = getPointerTy(MF.getDataLayout());
739 OutChains.push_back(
740 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
742 }
743
744 if (!OutChains.empty())
745 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
746
747 return Chain;
748}
749
750// Check whether any of the argument registers are reserved
752 const MachineFunction &MF) {
753 // The register window design means that outgoing parameters at O*
754 // will appear in the callee as I*.
755 // Be conservative and check both sides of the register names.
756 bool Outgoing =
757 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
758 return TRI->isReservedReg(MF, r);
759 });
760 bool Incoming =
761 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
762 return TRI->isReservedReg(MF, r);
763 });
764 return Outgoing || Incoming;
765}
766
768 const Function &F = MF.getFunction();
769 F.getContext().diagnose(DiagnosticInfoUnsupported{
770 F, ("SPARC doesn't support"
771 " function calls if any of the argument registers is reserved.")});
772}
773
776 SmallVectorImpl<SDValue> &InVals) const {
777 if (Subtarget->is64Bit())
778 return LowerCall_64(CLI, InVals);
779 return LowerCall_32(CLI, InVals);
780}
781
782static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
783 const CallBase *Call) {
784 if (Call)
785 return Call->hasFnAttr(Attribute::ReturnsTwice);
786
787 const Function *CalleeFn = nullptr;
789 CalleeFn = dyn_cast<Function>(G->getGlobal());
790 } else if (ExternalSymbolSDNode *E =
792 const Function &Fn = DAG.getMachineFunction().getFunction();
793 const Module *M = Fn.getParent();
794 const char *CalleeName = E->getSymbol();
795 CalleeFn = M->getFunction(CalleeName);
796 }
797
798 if (!CalleeFn)
799 return false;
800 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
801}
802
803/// IsEligibleForTailCallOptimization - Check whether the call is eligible
804/// for tail call optimization.
806 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
807
808 auto &Outs = CLI.Outs;
809 auto &Caller = MF.getFunction();
810
811 // Do not tail call opt functions with "disable-tail-calls" attribute.
812 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
813 return false;
814
815 // Do not tail call opt if the stack is used to pass parameters.
816 // 64-bit targets have a slightly higher limit since the ABI requires
817 // to allocate some space even when all the parameters fit inside registers.
818 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
819 if (CCInfo.getStackSize() > StackSizeLimit)
820 return false;
821
822 // Do not tail call opt if either the callee or caller returns
823 // a struct and the other does not.
824 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
825 return false;
826
827 // Byval parameters hand the function a pointer directly into the stack area
828 // we want to reuse during a tail call.
829 for (auto &Arg : Outs)
830 if (Arg.Flags.isByVal())
831 return false;
832
833 return true;
834}
835
836// Lower a call for the 32-bit ABI.
839 SmallVectorImpl<SDValue> &InVals) const {
840 SelectionDAG &DAG = CLI.DAG;
841 SDLoc &dl = CLI.DL;
843 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
845 SDValue Chain = CLI.Chain;
846 SDValue Callee = CLI.Callee;
847 bool &isTailCall = CLI.IsTailCall;
848 CallingConv::ID CallConv = CLI.CallConv;
849 bool isVarArg = CLI.IsVarArg;
851 LLVMContext &Ctx = *DAG.getContext();
852 EVT PtrVT = getPointerTy(MF.getDataLayout());
853
854 // Analyze operands of the call, assigning locations to each operand.
856 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
857 *DAG.getContext());
858 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
859
860 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
861 CCInfo, CLI, DAG.getMachineFunction());
862
863 // Get the size of the outgoing arguments stack space requirement.
864 unsigned ArgsSize = CCInfo.getStackSize();
865
866 // Keep stack frames 8-byte aligned.
867 ArgsSize = (ArgsSize+7) & ~7;
868
870
871 // Create local copies for byval args.
872 SmallVector<SDValue, 8> ByValArgs;
873 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
874 ISD::ArgFlagsTy Flags = Outs[i].Flags;
875 if (!Flags.isByVal())
876 continue;
877
878 SDValue Arg = OutVals[i];
879 unsigned Size = Flags.getByValSize();
880 Align Alignment = Flags.getNonZeroByValAlign();
881
882 if (Size > 0U) {
883 int FI = MFI.CreateStackObject(Size, Alignment, false);
884 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
885 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
886
887 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
888 false, // isVolatile,
889 (Size <= 32), // AlwaysInline if size <= 32,
890 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
892 ByValArgs.push_back(FIPtr);
893 }
894 else {
895 SDValue nullVal;
896 ByValArgs.push_back(nullVal);
897 }
898 }
899
900 assert(!isTailCall || ArgsSize == 0);
901
902 if (!isTailCall)
903 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
904
906 SmallVector<SDValue, 8> MemOpChains;
907
908 const unsigned StackOffset = 92;
909 bool hasStructRetAttr = false;
910 unsigned SRetArgSize = 0;
911 // Walk the register/memloc assignments, inserting copies/loads.
912 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
913 i != e;
914 ++i, ++realArgIdx) {
915 CCValAssign &VA = ArgLocs[i];
916 SDValue Arg = OutVals[realArgIdx];
917
918 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
919
920 // Use local copy if it is a byval arg.
921 if (Flags.isByVal()) {
922 Arg = ByValArgs[byvalArgIdx++];
923 if (!Arg) {
924 continue;
925 }
926 }
927
928 // Promote the value if needed.
929 switch (VA.getLocInfo()) {
930 default: llvm_unreachable("Unknown loc info!");
933 break;
935 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
936 break;
938 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
939 break;
941 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
942 break;
944 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
945 break;
946 }
947
948 if (Flags.isSRet()) {
949 assert(VA.needsCustom());
950
951 if (isTailCall)
952 continue;
953
954 // store SRet argument in %sp+64
955 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
956 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
957 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
958 MemOpChains.push_back(
959 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
960 hasStructRetAttr = true;
961 // sret only allowed on first argument
962 assert(Outs[realArgIdx].OrigArgIndex == 0);
963 SRetArgSize =
964 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
965 continue;
966 }
967
968 if (VA.needsCustom()) {
969 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
970
971 if (VA.isMemLoc()) {
972 unsigned Offset = VA.getLocMemOffset() + StackOffset;
973 // if it is double-word aligned, just store.
974 if (Offset % 8 == 0) {
975 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
976 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
977 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
978 MemOpChains.push_back(
979 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
980 continue;
981 }
982 }
983
984 if (VA.getLocVT() == MVT::f64) {
985 // Move from the float value from float registers into the
986 // integer registers.
988 Arg = bitcastConstantFPToInt(C, dl, DAG);
989 else
990 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
991 }
992
993 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
994 Arg,
995 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
996 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
997 Arg,
998 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
999
1000 if (VA.isRegLoc()) {
1001 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
1002 assert(i+1 != e);
1003 CCValAssign &NextVA = ArgLocs[++i];
1004 if (NextVA.isRegLoc()) {
1005 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
1006 } else {
1007 // Store the second part in stack.
1008 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
1009 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1010 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1011 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1012 MemOpChains.push_back(
1013 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1014 }
1015 } else {
1016 unsigned Offset = VA.getLocMemOffset() + StackOffset;
1017 // Store the first part.
1018 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1019 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1020 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1021 MemOpChains.push_back(
1022 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
1023 // Store the second part.
1024 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
1025 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1026 MemOpChains.push_back(
1027 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1028 }
1029 continue;
1030 }
1031
1032 if (VA.getLocInfo() == CCValAssign::Indirect) {
1033 // Store the argument in a stack slot and pass its address.
1034 unsigned ArgIndex = Outs[realArgIdx].OrigArgIndex;
1035 assert(Outs[realArgIdx].PartOffset == 0);
1036
1037 EVT SlotVT;
1038 if (i + 1 != e && Outs[realArgIdx + 1].OrigArgIndex == ArgIndex) {
1039 Type *OrigArgType = CLI.Args[ArgIndex].Ty;
1040 EVT OrigArgVT = getValueType(MF.getDataLayout(), OrigArgType);
1041 MVT PartVT =
1042 getRegisterTypeForCallingConv(Ctx, CLI.CallConv, OrigArgVT);
1043 unsigned N =
1044 getNumRegistersForCallingConv(Ctx, CLI.CallConv, OrigArgVT);
1045 SlotVT = EVT::getIntegerVT(Ctx, PartVT.getSizeInBits() * N);
1046 } else {
1047 SlotVT = Outs[realArgIdx].VT;
1048 }
1049
1050 SDValue SpillSlot = DAG.CreateStackTemporary(SlotVT);
1051 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1052 MemOpChains.push_back(
1053 DAG.getStore(Chain, dl, Arg, SpillSlot,
1055 // If the original argument was split (e.g. f128), we need
1056 // to store all parts of it here (and pass just one address).
1057 while (i + 1 != e && Outs[realArgIdx + 1].OrigArgIndex == ArgIndex) {
1058 SDValue PartValue = OutVals[realArgIdx + 1];
1059 unsigned PartOffset = Outs[realArgIdx + 1].PartOffset;
1061 DAG.getFrameIndex(FI, PtrVT), TypeSize::getFixed(PartOffset), dl);
1062 MemOpChains.push_back(
1063 DAG.getStore(Chain, dl, PartValue, Address,
1065 assert((PartOffset + PartValue.getValueType().getStoreSize() <=
1066 SlotVT.getStoreSize()) &&
1067 "Not enough space for argument part!");
1068 ++i;
1069 ++realArgIdx;
1070 }
1071
1072 Arg = SpillSlot;
1073 }
1074
1075 // Arguments that can be passed on register must be kept at
1076 // RegsToPass vector
1077 if (VA.isRegLoc()) {
1078 if (VA.getLocVT() != MVT::f32) {
1079 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1080 continue;
1081 }
1082 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1083 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1084 continue;
1085 }
1086
1087 assert(VA.isMemLoc());
1088
1089 // Create a store off the stack pointer for this argument.
1090 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1092 dl);
1093 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1094 MemOpChains.push_back(
1095 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1096 }
1097
1098
1099 // Emit all stores, make sure the occur before any copies into physregs.
1100 if (!MemOpChains.empty())
1101 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1102
1103 // Build a sequence of copy-to-reg nodes chained together with token
1104 // chain and flag operands which copy the outgoing args into registers.
1105 // The InGlue in necessary since all emitted instructions must be
1106 // stuck together.
1107 SDValue InGlue;
1108 for (const auto &[OrigReg, N] : RegsToPass) {
1109 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1110 Chain = DAG.getCopyToReg(Chain, dl, Reg, N, InGlue);
1111 InGlue = Chain.getValue(1);
1112 }
1113
1114 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1115
1116 // If the callee is a GlobalAddress node (quite common, every direct call is)
1117 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1118 // Likewise ExternalSymbol -> TargetExternalSymbol.
1120 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0);
1122 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1123
1124 // Returns a chain & a flag for retval copy to use
1125 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1127 Ops.push_back(Chain);
1128 Ops.push_back(Callee);
1129 if (hasStructRetAttr)
1130 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1131 for (const auto &[OrigReg, N] : RegsToPass) {
1132 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1133 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1134 }
1135
1136 // Add a register mask operand representing the call-preserved registers.
1137 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1138 const uint32_t *Mask =
1139 ((hasReturnsTwice)
1140 ? TRI->getRTCallPreservedMask(CallConv)
1141 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1142
1143 if (isAnyArgRegReserved(TRI, MF))
1145
1146 assert(Mask && "Missing call preserved mask for calling convention");
1147 Ops.push_back(DAG.getRegisterMask(Mask));
1148
1149 if (InGlue.getNode())
1150 Ops.push_back(InGlue);
1151
1152 if (isTailCall) {
1154 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1155 }
1156
1157 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1158 InGlue = Chain.getValue(1);
1159
1160 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1161 InGlue = Chain.getValue(1);
1162
1163 // Assign locations to each value returned by this call.
1165 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1166 *DAG.getContext());
1167
1168 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1169
1170 // Copy all of the result registers out of their specified physreg.
1171 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1172 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1173 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1174 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1176 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1177 Chain = Lo.getValue(1);
1178 InGlue = Lo.getValue(2);
1179 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1180 DAG.getConstant(0, dl, MVT::i32));
1182 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1183 Chain = Hi.getValue(1);
1184 InGlue = Hi.getValue(2);
1185 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1186 DAG.getConstant(1, dl, MVT::i32));
1187 InVals.push_back(Vec);
1188 } else {
1189 Chain =
1190 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1191 RVLocs[i].getValVT(), InGlue)
1192 .getValue(1);
1193 InGlue = Chain.getValue(2);
1194 InVals.push_back(Chain.getValue(0));
1195 }
1196 }
1197
1198 return Chain;
1199}
1200
1201// FIXME? Maybe this could be a TableGen attribute on some registers and
1202// this table could be generated automatically from RegInfo.
1204 const MachineFunction &MF) const {
1206 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1207 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1208 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1209 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1210 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1211 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1212 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1213 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1214 .Default(0);
1215
1216 // If we're directly referencing register names
1217 // (e.g in GCC C extension `register int r asm("g1");`),
1218 // make sure that said register is in the reserve list.
1219 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1220 if (!TRI->isReservedReg(MF, Reg))
1221 Reg = Register();
1222
1223 return Reg;
1224}
1225
1226// Fixup floating point arguments in the ... part of a varargs call.
1227//
1228// The SPARC v9 ABI requires that floating point arguments are treated the same
1229// as integers when calling a varargs function. This does not apply to the
1230// fixed arguments that are part of the function's prototype.
1231//
1232// This function post-processes a CCValAssign array created by
1233// AnalyzeCallOperands().
1236 for (CCValAssign &VA : ArgLocs) {
1237 MVT ValTy = VA.getLocVT();
1238 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1239 // varargs functions.
1240 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1241 continue;
1242 // The fixed arguments to a varargs function still go in FP registers.
1243 if (!Outs[VA.getValNo()].Flags.isVarArg())
1244 continue;
1245
1246 // This floating point argument should be reassigned.
1247 // Determine the offset into the argument array.
1248 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1249 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1250 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1251 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1252
1253 if (Offset < 6*8) {
1254 // This argument should go in %i0-%i5.
1255 unsigned IReg = SP::I0 + Offset/8;
1256 if (ValTy == MVT::f64)
1257 // Full register, just bitconvert into i64.
1258 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1260 else {
1261 assert(ValTy == MVT::f128 && "Unexpected type!");
1262 // Full register, just bitconvert into i128 -- We will lower this into
1263 // two i64s in LowerCall_64.
1264 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1265 MVT::i128, CCValAssign::BCvt);
1266 }
1267 } else {
1268 // This needs to go to memory, we're out of integer registers.
1269 VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,
1270 VA.getLocVT(), VA.getLocInfo());
1271 }
1272 }
1273}
1274
1275// Lower a call for the 64-bit ABI.
1276SDValue
1278 SmallVectorImpl<SDValue> &InVals) const {
1279 SelectionDAG &DAG = CLI.DAG;
1280 SDLoc DL = CLI.DL;
1281 SDValue Chain = CLI.Chain;
1282 auto PtrVT = getPointerTy(DAG.getDataLayout());
1284
1285 // Analyze operands of the call, assigning locations to each operand.
1287 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1288 *DAG.getContext());
1289 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1290
1292 CCInfo, CLI, DAG.getMachineFunction());
1293
1294 // Get the size of the outgoing arguments stack space requirement.
1295 // The stack offset computed by CC_Sparc64 includes all arguments.
1296 // Called functions expect 6 argument words to exist in the stack frame, used
1297 // or not.
1298 unsigned StackReserved = 6 * 8u;
1299 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1300
1301 // Keep stack frames 16-byte aligned.
1302 ArgsSize = alignTo(ArgsSize, 16);
1303
1304 // Varargs calls require special treatment.
1305 if (CLI.IsVarArg)
1306 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1307
1308 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1309
1310 // Adjust the stack pointer to make room for the arguments.
1311 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1312 // with more than 6 arguments.
1313 if (!CLI.IsTailCall)
1314 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1315
1316 // Collect the set of registers to pass to the function and their values.
1317 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1318 // instruction.
1320
1321 // Collect chains from all the memory opeations that copy arguments to the
1322 // stack. They must follow the stack pointer adjustment above and precede the
1323 // call instruction itself.
1324 SmallVector<SDValue, 8> MemOpChains;
1325
1326 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1327 const CCValAssign &VA = ArgLocs[i];
1328 SDValue Arg = CLI.OutVals[i];
1329
1330 // Promote the value if needed.
1331 switch (VA.getLocInfo()) {
1332 default:
1333 llvm_unreachable("Unknown location info!");
1334 case CCValAssign::Full:
1335 break;
1336 case CCValAssign::SExt:
1337 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1338 break;
1339 case CCValAssign::ZExt:
1340 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1341 break;
1342 case CCValAssign::AExt:
1343 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1344 break;
1345 case CCValAssign::BCvt:
1346 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1347 // SPARC does not support i128 natively. Lower it into two i64, see below.
1348 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1349 || VA.getLocVT() != MVT::i128)
1350 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1351 break;
1352 }
1353
1354 if (VA.isRegLoc()) {
1355 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1356 && VA.getLocVT() == MVT::i128) {
1357 // Store and reload into the integer register reg and reg+1.
1358 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1359 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1360 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1361 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1362 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1363 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1364 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1365
1366 // Store to %sp+BIAS+128+Offset
1367 SDValue Store =
1368 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1369 // Load into Reg and Reg+1
1370 SDValue Hi64 =
1371 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1372 SDValue Lo64 =
1373 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1374
1375 Register HiReg = VA.getLocReg();
1376 Register LoReg = VA.getLocReg() + 1;
1377 if (!CLI.IsTailCall) {
1378 HiReg = toCallerWindow(HiReg);
1379 LoReg = toCallerWindow(LoReg);
1380 }
1381
1382 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1383 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1384 continue;
1385 }
1386
1387 // The custom bit on an i32 return value indicates that it should be
1388 // passed in the high bits of the register.
1389 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1390 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1391 DAG.getConstant(32, DL, MVT::i32));
1392
1393 // The next value may go in the low bits of the same register.
1394 // Handle both at once.
1395 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1396 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1397 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1398 CLI.OutVals[i+1]);
1399 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1400 // Skip the next value, it's already done.
1401 ++i;
1402 }
1403 }
1404
1405 Register Reg = VA.getLocReg();
1406 if (!CLI.IsTailCall)
1407 Reg = toCallerWindow(Reg);
1408 RegsToPass.push_back(std::make_pair(Reg, Arg));
1409 continue;
1410 }
1411
1412 assert(VA.isMemLoc());
1413
1414 // Create a store off the stack pointer for this argument.
1415 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1416 // The argument area starts at %fp+BIAS+128 in the callee frame,
1417 // %sp+BIAS+128 in ours.
1418 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1419 Subtarget->getStackPointerBias() +
1420 128, DL);
1421 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1422 MemOpChains.push_back(
1423 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1424 }
1425
1426 // Emit all stores, make sure they occur before the call.
1427 if (!MemOpChains.empty())
1428 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1429
1430 // Build a sequence of CopyToReg nodes glued together with token chain and
1431 // glue operands which copy the outgoing args into registers. The InGlue is
1432 // necessary since all emitted instructions must be stuck together in order
1433 // to pass the live physical registers.
1434 SDValue InGlue;
1435 for (const auto &[Reg, N] : RegsToPass) {
1436 Chain = DAG.getCopyToReg(Chain, DL, Reg, N, InGlue);
1437 InGlue = Chain.getValue(1);
1438 }
1439
1440 // If the callee is a GlobalAddress node (quite common, every direct call is)
1441 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1442 // Likewise ExternalSymbol -> TargetExternalSymbol.
1443 SDValue Callee = CLI.Callee;
1444 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1446 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0);
1448 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1449
1450 // Build the operands for the call instruction itself.
1452 Ops.push_back(Chain);
1453 Ops.push_back(Callee);
1454 for (const auto &[Reg, N] : RegsToPass)
1455 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1456
1457 // Add a register mask operand representing the call-preserved registers.
1458 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1459 const uint32_t *Mask =
1460 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1461 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1462 CLI.CallConv));
1463
1464 if (isAnyArgRegReserved(TRI, MF))
1466
1467 assert(Mask && "Missing call preserved mask for calling convention");
1468 Ops.push_back(DAG.getRegisterMask(Mask));
1469
1470 // Make sure the CopyToReg nodes are glued to the call instruction which
1471 // consumes the registers.
1472 if (InGlue.getNode())
1473 Ops.push_back(InGlue);
1474
1475 // Now the call itself.
1476 if (CLI.IsTailCall) {
1478 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1479 }
1480 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1481 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1482 InGlue = Chain.getValue(1);
1483
1484 // Revert the stack pointer immediately after the call.
1485 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1486 InGlue = Chain.getValue(1);
1487
1488 // Now extract the return values. This is more or less the same as
1489 // LowerFormalArguments_64.
1490
1491 // Assign locations to each value returned by this call.
1493 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1494 *DAG.getContext());
1495
1496 // Set inreg flag manually for codegen generated library calls that
1497 // return float.
1498 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1499 CLI.Ins[0].Flags.setInReg();
1500
1501 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1502
1503 // Copy all of the result registers out of their specified physreg.
1504 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1505 CCValAssign &VA = RVLocs[i];
1506 assert(VA.isRegLoc() && "Can only return in registers!");
1507 unsigned Reg = toCallerWindow(VA.getLocReg());
1508
1509 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1510 // reside in the same register in the high and low bits. Reuse the
1511 // CopyFromReg previous node to avoid duplicate copies.
1512 SDValue RV;
1513 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1514 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1515 RV = Chain.getValue(0);
1516
1517 // But usually we'll create a new CopyFromReg for a different register.
1518 if (!RV.getNode()) {
1519 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1520 Chain = RV.getValue(1);
1521 InGlue = Chain.getValue(2);
1522 }
1523
1524 // Get the high bits for i32 struct elements.
1525 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1526 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1527 DAG.getConstant(32, DL, MVT::i32));
1528
1529 // The callee promoted the return value, so insert an Assert?ext SDNode so
1530 // we won't promote the value again in this function.
1531 switch (VA.getLocInfo()) {
1532 case CCValAssign::SExt:
1533 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1534 DAG.getValueType(VA.getValVT()));
1535 break;
1536 case CCValAssign::ZExt:
1537 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1538 DAG.getValueType(VA.getValVT()));
1539 break;
1540 default:
1541 break;
1542 }
1543
1544 // Truncate the register down to the return value type.
1545 if (VA.isExtInLoc())
1546 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1547
1548 InVals.push_back(RV);
1549 }
1550
1551 return Chain;
1552}
1553
1554//===----------------------------------------------------------------------===//
1555// TargetLowering Implementation
1556//===----------------------------------------------------------------------===//
1557
1560 if (AI->getOperation() == AtomicRMWInst::Xchg &&
1561 AI->getType()->getPrimitiveSizeInBits() == 32)
1562 return AtomicExpansionKind::None; // Uses xchg instruction
1563
1565}
1566
1567/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1568/// rcond condition.
1570 switch (CC) {
1571 default:
1572 llvm_unreachable("Unknown/unsigned integer condition code!");
1573 case ISD::SETEQ:
1574 return SPCC::REG_Z;
1575 case ISD::SETNE:
1576 return SPCC::REG_NZ;
1577 case ISD::SETLT:
1578 return SPCC::REG_LZ;
1579 case ISD::SETGT:
1580 return SPCC::REG_GZ;
1581 case ISD::SETLE:
1582 return SPCC::REG_LEZ;
1583 case ISD::SETGE:
1584 return SPCC::REG_GEZ;
1585 }
1586}
1587
1588/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1589/// condition.
1591 switch (CC) {
1592 default: llvm_unreachable("Unknown integer condition code!");
1593 case ISD::SETEQ: return SPCC::ICC_E;
1594 case ISD::SETNE: return SPCC::ICC_NE;
1595 case ISD::SETLT: return SPCC::ICC_L;
1596 case ISD::SETGT: return SPCC::ICC_G;
1597 case ISD::SETLE: return SPCC::ICC_LE;
1598 case ISD::SETGE: return SPCC::ICC_GE;
1599 case ISD::SETULT: return SPCC::ICC_CS;
1600 case ISD::SETULE: return SPCC::ICC_LEU;
1601 case ISD::SETUGT: return SPCC::ICC_GU;
1602 case ISD::SETUGE: return SPCC::ICC_CC;
1603 }
1604}
1605
1606/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1607/// FCC condition.
1609 switch (CC) {
1610 default: llvm_unreachable("Unknown fp condition code!");
1611 case ISD::SETEQ:
1612 case ISD::SETOEQ: return SPCC::FCC_E;
1613 case ISD::SETNE:
1614 case ISD::SETUNE: return SPCC::FCC_NE;
1615 case ISD::SETLT:
1616 case ISD::SETOLT: return SPCC::FCC_L;
1617 case ISD::SETGT:
1618 case ISD::SETOGT: return SPCC::FCC_G;
1619 case ISD::SETLE:
1620 case ISD::SETOLE: return SPCC::FCC_LE;
1621 case ISD::SETGE:
1622 case ISD::SETOGE: return SPCC::FCC_GE;
1623 case ISD::SETULT: return SPCC::FCC_UL;
1624 case ISD::SETULE: return SPCC::FCC_ULE;
1625 case ISD::SETUGT: return SPCC::FCC_UG;
1626 case ISD::SETUGE: return SPCC::FCC_UGE;
1627 case ISD::SETUO: return SPCC::FCC_U;
1628 case ISD::SETO: return SPCC::FCC_O;
1629 case ISD::SETONE: return SPCC::FCC_LG;
1630 case ISD::SETUEQ: return SPCC::FCC_UE;
1631 }
1632}
1633
1635 const SparcSubtarget &STI)
1636 : TargetLowering(TM, STI), Subtarget(&STI) {
1637 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1638
1639 // Instructions which use registers as conditionals examine all the
1640 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1641 // matters much whether it's ZeroOrOneBooleanContent, or
1642 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1643 // former.
1646
1647 // Set up the register classes.
1648 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1649 if (!Subtarget->useSoftFloat()) {
1650 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1651 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1652 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1653 }
1654 if (Subtarget->is64Bit()) {
1655 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1656 } else {
1657 // On 32bit sparc, we define a double-register 32bit register
1658 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1659 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1660
1661 // ...but almost all operations must be expanded, so set that as
1662 // the default.
1663 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1664 setOperationAction(Op, MVT::v2i32, Expand);
1665 }
1666 // Truncating/extending stores/loads are also not supported.
1668 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1669 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1670 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1671
1672 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1673 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1674 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1675
1676 setTruncStoreAction(VT, MVT::v2i32, Expand);
1677 setTruncStoreAction(MVT::v2i32, VT, Expand);
1678 }
1679 // However, load and store *are* legal.
1680 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1681 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1684
1685 // And we need to promote i64 loads/stores into vector load/store
1688
1689 // Sadly, this doesn't work:
1690 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1691 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1692 }
1693
1694 // Turn FP extload into load/fpextend
1695 for (MVT VT : MVT::fp_valuetypes()) {
1696 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1697 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1698 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1699 }
1700
1701 // Sparc doesn't have i1 sign extending load
1702 for (MVT VT : MVT::integer_valuetypes())
1703 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1704
1705 // Turn FP truncstore into trunc + store.
1706 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1707 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1708 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1709 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1710 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1711 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1712
1713 // Custom legalize GlobalAddress nodes into LO/HI parts.
1718
1719 // Sparc doesn't have sext_inreg, replace them with shl/sra
1723
1724 // Sparc has no REM or DIVREM operations.
1729
1730 // ... nor does SparcV9.
1731 if (Subtarget->is64Bit()) {
1736 }
1737
1738 // Custom expand fp<->sint
1743
1744 // Custom Expand fp<->uint
1749
1750 // Lower f16 conversion operations into library calls
1757
1759 Subtarget->isVIS3() ? Legal : Expand);
1761 Subtarget->isVIS3() ? Legal : Expand);
1762
1763 // Sparc has no select or setcc: expand to SELECT_CC.
1768
1773
1774 // Sparc doesn't have BRCOND either, it has BR_CC.
1776 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1777 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1782
1787
1792
1793 if (Subtarget->isVIS3()) {
1796 }
1797
1798 if (Subtarget->is64Bit()) {
1800 Subtarget->isVIS3() ? Legal : Expand);
1802 Subtarget->isVIS3() ? Legal : Expand);
1807
1809 Subtarget->usePopc() ? Legal : Expand);
1811 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1812 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1814 }
1815
1816 // ATOMICs.
1817 // Atomics are supported on SparcV9. 32-bit atomics are also
1818 // supported by some Leon SparcV8 variants. Otherwise, atomics
1819 // are unsupported.
1820 if (Subtarget->isV9()) {
1821 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1822 // but it hasn't been implemented in the backend yet.
1823 if (Subtarget->is64Bit())
1825 else
1827 } else if (Subtarget->hasLeonCasa())
1829 else
1831
1833
1835
1837
1838 // Custom Lower Atomic LOAD/STORE
1841
1842 if (Subtarget->is64Bit()) {
1847 }
1848
1849 if (!Subtarget->isV9()) {
1850 // SparcV8 does not have FNEGD and FABSD.
1853 }
1854
1855 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1856 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1859 setOperationAction(ISD::FMA , MVT::f128, Expand);
1860 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1861 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1864 setOperationAction(ISD::FMA, MVT::f64,
1865 Subtarget->isUA2007() ? Legal : Expand);
1866 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1867 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1870 setOperationAction(ISD::FMA, MVT::f32,
1871 Subtarget->isUA2007() ? Legal : Expand);
1872 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1873 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1878 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1879 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1880 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1881
1885
1886 // Expands to [SU]MUL_LOHI.
1890
1891 if (Subtarget->useSoftMulDiv()) {
1892 // .umul works for both signed and unsigned
1897 }
1898
1899 if (Subtarget->is64Bit()) {
1903 Subtarget->isVIS3() ? Legal : Expand);
1905 Subtarget->isVIS3() ? Legal : Expand);
1906
1910 }
1911
1912 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1913 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1914 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1915 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1916
1917 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1919
1920 // Use the default implementation.
1921 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1922 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1927
1929
1931 Subtarget->usePopc() ? Legal : Expand);
1932
1933 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1934 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1935 setOperationAction(ISD::STORE, MVT::f128, Legal);
1936 } else {
1937 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1939 }
1940
1941 if (Subtarget->hasHardQuad()) {
1942 setOperationAction(ISD::FADD, MVT::f128, Legal);
1943 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1944 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1945 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1946 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1949 if (Subtarget->isV9()) {
1950 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1951 setOperationAction(ISD::FABS, MVT::f128, Legal);
1952 } else {
1953 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1954 setOperationAction(ISD::FABS, MVT::f128, Custom);
1955 }
1956 } else {
1957 // Custom legalize f128 operations.
1958
1959 setOperationAction(ISD::FADD, MVT::f128, Custom);
1960 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1961 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1962 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1964 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1965 setOperationAction(ISD::FABS, MVT::f128, Custom);
1966
1970 }
1971
1972 if (Subtarget->fixAllFDIVSQRT()) {
1973 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1974 // the former instructions generate errata on LEON processors.
1977 }
1978
1979 if (Subtarget->hasNoFMULS()) {
1981 }
1982
1983 // Custom combine bitcast between f64 and v2i32
1984 if (!Subtarget->is64Bit())
1986
1987 if (Subtarget->hasLeonCycleCounter())
1989
1990 if (Subtarget->isVIS3()) {
1995
1996 setOperationAction(ISD::CTTZ, MVT::i32,
1997 Subtarget->is64Bit() ? Promote : Expand);
2000 Subtarget->is64Bit() ? Promote : Expand);
2002 } else if (Subtarget->usePopc()) {
2007
2012 } else {
2016 Subtarget->is64Bit() ? Promote : LibCall);
2018
2019 // FIXME here we don't have any ISA extensions that could help us, so to
2020 // prevent large expansions those should be made into LibCalls.
2025 }
2026
2028
2029 // Some processors have no branch predictor and have pipelines longer than
2030 // what can be covered by the delay slot. This results in a stall, so mark
2031 // branches to be expensive on those processors.
2032 setJumpIsExpensive(Subtarget->hasNoPredictor());
2033 // The high cost of branching means that using conditional moves will
2034 // still be profitable even if the condition is predictable.
2036
2038
2039 computeRegisterProperties(Subtarget->getRegisterInfo());
2040}
2041
2043 return Subtarget->useSoftFloat();
2044}
2045
2047 EVT VT) const {
2048 if (!VT.isVector())
2049 return MVT::i32;
2051}
2052
2053/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2054/// be zero. Op is expected to be a target specific node. Used by DAG
2055/// combiner.
2057 (const SDValue Op,
2058 KnownBits &Known,
2059 const APInt &DemandedElts,
2060 const SelectionDAG &DAG,
2061 unsigned Depth) const {
2062 KnownBits Known2;
2063 Known.resetAll();
2064
2065 switch (Op.getOpcode()) {
2066 default: break;
2067 case SPISD::SELECT_ICC:
2068 case SPISD::SELECT_XCC:
2069 case SPISD::SELECT_FCC:
2070 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2071 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2072
2073 // Only known if known in both the LHS and RHS.
2074 Known = Known.intersectWith(Known2);
2075 break;
2076 }
2077}
2078
2079// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2080// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2082 ISD::CondCode CC, unsigned &SPCC) {
2083 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2084 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2085 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2086 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2087 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2088 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2089 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2090 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2091 SDValue CMPCC = LHS.getOperand(3);
2092 SPCC = LHS.getConstantOperandVal(2);
2093 LHS = CMPCC.getOperand(0);
2094 RHS = CMPCC.getOperand(1);
2095 }
2096}
2097
2098// Convert to a target node and set target flags.
2100 SelectionDAG &DAG) const {
2102 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2103 SDLoc(GA),
2104 GA->getValueType(0),
2105 GA->getOffset(), TF);
2106
2108 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2109 CP->getAlign(), CP->getOffset(), TF);
2110
2112 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2113 Op.getValueType(),
2114 0,
2115 TF);
2116
2118 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2119 ES->getValueType(0), TF);
2120
2121 llvm_unreachable("Unhandled address SDNode");
2122}
2123
2124// Split Op into high and low parts according to HiTF and LoTF.
2125// Return an ADD node combining the parts.
2127 unsigned HiTF, unsigned LoTF,
2128 SelectionDAG &DAG) const {
2129 SDLoc DL(Op);
2130 EVT VT = Op.getValueType();
2131 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2132 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2133 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2134}
2135
2136// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2137// or ExternalSymbol SDNode.
2139 SDLoc DL(Op);
2140 EVT VT = getPointerTy(DAG.getDataLayout());
2141
2142 // Handle PIC mode first. SPARC needs a got load for every variable!
2143 if (isPositionIndependent()) {
2144 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2145 PICLevel::Level picLevel = M->getPICLevel();
2146 SDValue Idx;
2147
2148 if (picLevel == PICLevel::SmallPIC) {
2149 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2150 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2151 withTargetFlags(Op, ELF::R_SPARC_GOT13, DAG));
2152 } else {
2153 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2154 Idx = makeHiLoPair(Op, ELF::R_SPARC_GOT22, ELF::R_SPARC_GOT10, DAG);
2155 }
2156
2157 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2158 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2159 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2160 // function has calls.
2162 MFI.setHasCalls(true);
2163 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2165 }
2166
2167 // This is one of the absolute code models.
2168 switch(getTargetMachine().getCodeModel()) {
2169 default:
2170 llvm_unreachable("Unsupported absolute code model");
2171 case CodeModel::Small:
2172 // abs32.
2173 return makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2174 case CodeModel::Medium: {
2175 // abs44.
2176 SDValue H44 = makeHiLoPair(Op, ELF::R_SPARC_H44, ELF::R_SPARC_M44, DAG);
2177 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2178 SDValue L44 = withTargetFlags(Op, ELF::R_SPARC_L44, DAG);
2179 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2180 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2181 }
2182 case CodeModel::Large: {
2183 // abs64.
2184 SDValue Hi = makeHiLoPair(Op, ELF::R_SPARC_HH22, ELF::R_SPARC_HM10, DAG);
2185 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2186 SDValue Lo = makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2187 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2188 }
2189 }
2190}
2191
2196
2201
2206
2208 SelectionDAG &DAG) const {
2209
2211 if (DAG.getTarget().useEmulatedTLS())
2212 return LowerToTLSEmulatedModel(GA, DAG);
2213
2214 SDLoc DL(GA);
2215 const GlobalValue *GV = GA->getGlobal();
2216 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2217
2219
2220 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2221 unsigned HiTF =
2222 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_HI22
2223 : ELF::R_SPARC_TLS_LDM_HI22);
2224 unsigned LoTF =
2225 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_LO10
2226 : ELF::R_SPARC_TLS_LDM_LO10);
2227 unsigned addTF =
2228 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_ADD
2229 : ELF::R_SPARC_TLS_LDM_ADD);
2230 unsigned callTF =
2231 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_CALL
2232 : ELF::R_SPARC_TLS_LDM_CALL);
2233
2234 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2235 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2236 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2237 withTargetFlags(Op, addTF, DAG));
2238
2239 SDValue Chain = DAG.getEntryNode();
2240 SDValue InGlue;
2241
2242 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2243 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2244 InGlue = Chain.getValue(1);
2245 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2246 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2247
2248 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2249 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2251 assert(Mask && "Missing call preserved mask for calling convention");
2252 SDValue Ops[] = {Chain,
2253 Callee,
2254 Symbol,
2255 DAG.getRegister(SP::O0, PtrVT),
2256 DAG.getRegisterMask(Mask),
2257 InGlue};
2258 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2259 InGlue = Chain.getValue(1);
2260 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
2261 InGlue = Chain.getValue(1);
2262 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2263
2264 if (model != TLSModel::LocalDynamic)
2265 return Ret;
2266
2267 SDValue Hi =
2268 DAG.getNode(SPISD::Hi, DL, PtrVT,
2269 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_HIX22, DAG));
2270 SDValue Lo =
2271 DAG.getNode(SPISD::Lo, DL, PtrVT,
2272 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_LOX10, DAG));
2273 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2274 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2275 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_ADD, DAG));
2276 }
2277
2278 if (model == TLSModel::InitialExec) {
2279 unsigned ldTF = ((PtrVT == MVT::i64) ? ELF::R_SPARC_TLS_IE_LDX
2280 : ELF::R_SPARC_TLS_IE_LD);
2281
2282 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2283
2284 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2285 // function has calls.
2287 MFI.setHasCalls(true);
2288
2289 SDValue TGA = makeHiLoPair(Op, ELF::R_SPARC_TLS_IE_HI22,
2290 ELF::R_SPARC_TLS_IE_LO10, DAG);
2291 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2292 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2293 DL, PtrVT, Ptr,
2294 withTargetFlags(Op, ldTF, DAG));
2295 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2296 DAG.getRegister(SP::G7, PtrVT), Offset,
2297 withTargetFlags(Op, ELF::R_SPARC_TLS_IE_ADD, DAG));
2298 }
2299
2300 assert(model == TLSModel::LocalExec);
2301 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2302 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_HIX22, DAG));
2303 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2304 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_LOX10, DAG));
2305 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2306
2307 return DAG.getNode(ISD::ADD, DL, PtrVT,
2308 DAG.getRegister(SP::G7, PtrVT), Offset);
2309}
2310
2312 ArgListTy &Args, SDValue Arg,
2313 const SDLoc &DL,
2314 SelectionDAG &DAG) const {
2316 EVT ArgVT = Arg.getValueType();
2317 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2318
2319 if (ArgTy->isFP128Ty()) {
2320 // Create a stack object and pass the pointer to the library function.
2321 int FI = MFI.CreateStackObject(16, Align(8), false);
2322 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2323 Chain = DAG.getStore(Chain, DL, Arg, FIPtr, MachinePointerInfo(), Align(8));
2324 Args.emplace_back(FIPtr, PointerType::getUnqual(ArgTy->getContext()));
2325 } else {
2326 Args.emplace_back(Arg, ArgTy);
2327 }
2328 return Chain;
2329}
2330
2332 RTLIB::Libcall LibFunc,
2333 unsigned numArgs) const {
2334 RTLIB::LibcallImpl LibFuncImpl = DAG.getLibcalls().getLibcallImpl(LibFunc);
2335 if (LibFuncImpl == RTLIB::Unsupported)
2336 return SDValue();
2337
2338 ArgListTy Args;
2339
2341 auto PtrVT = getPointerTy(DAG.getDataLayout());
2342
2343 SDValue Callee = DAG.getExternalSymbol(LibFuncImpl, PtrVT);
2344 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2345 Type *RetTyABI = RetTy;
2346 SDValue Chain = DAG.getEntryNode();
2347 SDValue RetPtr;
2348
2349 if (RetTy->isFP128Ty()) {
2350 // Create a Stack Object to receive the return value of type f128.
2351 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2352 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2353 ArgListEntry Entry(RetPtr, PointerType::getUnqual(RetTy->getContext()));
2354 if (!Subtarget->is64Bit()) {
2355 Entry.IsSRet = true;
2356 Entry.IndirectType = RetTy;
2357 }
2358 Entry.IsReturned = false;
2359 Args.push_back(Entry);
2360 RetTyABI = Type::getVoidTy(*DAG.getContext());
2361 }
2362
2363 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2364 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2365 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2366 }
2367
2370 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain).setCallee(CC, RetTyABI, Callee,
2371 std::move(Args));
2372
2373 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2374
2375 // chain is in second result.
2376 if (RetTyABI == RetTy)
2377 return CallInfo.first;
2378
2379 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2380
2381 Chain = CallInfo.second;
2382
2383 // Load RetPtr to get the return value.
2384 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2386}
2387
2389 unsigned &SPCC, const SDLoc &DL,
2390 SelectionDAG &DAG) const {
2391
2392 const char *LibCall = nullptr;
2393 bool is64Bit = Subtarget->is64Bit();
2394 switch(SPCC) {
2395 default: llvm_unreachable("Unhandled conditional code!");
2396 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2397 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2398 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2399 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2400 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2401 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2402 case SPCC::FCC_UL :
2403 case SPCC::FCC_ULE:
2404 case SPCC::FCC_UG :
2405 case SPCC::FCC_UGE:
2406 case SPCC::FCC_U :
2407 case SPCC::FCC_O :
2408 case SPCC::FCC_LG :
2409 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2410 }
2411
2412 auto PtrVT = getPointerTy(DAG.getDataLayout());
2413 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2414 Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2415 ArgListTy Args;
2416 SDValue Chain = DAG.getEntryNode();
2417 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2418 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2419
2421 CLI.setDebugLoc(DL).setChain(Chain)
2422 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2423
2424 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2425
2426 // result is in first, and chain is in second result.
2427 SDValue Result = CallInfo.first;
2428
2429 switch(SPCC) {
2430 default: {
2431 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2433 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2434 }
2435 case SPCC::FCC_UL : {
2436 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2437 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2438 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2440 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2441 }
2442 case SPCC::FCC_ULE: {
2443 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2445 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2446 }
2447 case SPCC::FCC_UG : {
2448 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2449 SPCC = SPCC::ICC_G;
2450 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2451 }
2452 case SPCC::FCC_UGE: {
2453 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2455 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2456 }
2457
2458 case SPCC::FCC_U : {
2459 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2460 SPCC = SPCC::ICC_E;
2461 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2462 }
2463 case SPCC::FCC_O : {
2464 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2466 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2467 }
2468 case SPCC::FCC_LG : {
2469 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2470 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2471 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2473 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2474 }
2475 case SPCC::FCC_UE : {
2476 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2477 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2478 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2479 SPCC = SPCC::ICC_E;
2480 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2481 }
2482 }
2483}
2484
2485static SDValue
2487 const SparcTargetLowering &TLI) {
2488
2489 if (Op.getOperand(0).getValueType() == MVT::f64)
2490 return TLI.LowerF128Op(Op, DAG, RTLIB::FPEXT_F64_F128, 1);
2491
2492 if (Op.getOperand(0).getValueType() == MVT::f32)
2493 return TLI.LowerF128Op(Op, DAG, RTLIB::FPEXT_F32_F128, 1);
2494
2495 llvm_unreachable("fpextend with non-float operand!");
2496 return SDValue();
2497}
2498
2499static SDValue
2501 const SparcTargetLowering &TLI) {
2502 // FP_ROUND on f64 and f32 are legal.
2503 if (Op.getOperand(0).getValueType() != MVT::f128)
2504 return Op;
2505
2506 if (Op.getValueType() == MVT::f64)
2507 return TLI.LowerF128Op(Op, DAG, RTLIB::FPROUND_F128_F64, 1);
2508 if (Op.getValueType() == MVT::f32)
2509 return TLI.LowerF128Op(Op, DAG, RTLIB::FPROUND_F128_F32, 1);
2510
2511 llvm_unreachable("fpround to non-float!");
2512 return SDValue();
2513}
2514
2516 const SparcTargetLowering &TLI,
2517 bool hasHardQuad) {
2518 SDLoc dl(Op);
2519 EVT VT = Op.getValueType();
2520 assert(VT == MVT::i32 || VT == MVT::i64);
2521
2522 // Expand f128 operations to fp128 abi calls.
2523 if (Op.getOperand(0).getValueType() == MVT::f128
2524 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2525 RTLIB::Libcall LibFunc =
2526 VT == MVT::i32 ? RTLIB::FPTOSINT_F128_I32 : RTLIB::FPTOSINT_F128_I64;
2527 return TLI.LowerF128Op(Op, DAG, LibFunc, 1);
2528 }
2529
2530 // Expand if the resulting type is illegal.
2531 if (!TLI.isTypeLegal(VT))
2532 return SDValue();
2533
2534 // Otherwise, Convert the fp value to integer in an FP register.
2535 if (VT == MVT::i32)
2536 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2537 else
2538 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2539
2540 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2541}
2542
2544 const SparcTargetLowering &TLI,
2545 bool hasHardQuad) {
2546 SDLoc dl(Op);
2547 EVT OpVT = Op.getOperand(0).getValueType();
2548 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2549
2550 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2551
2552 // Expand f128 operations to fp128 ABI calls.
2553 if (Op.getValueType() == MVT::f128
2554 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2555 RTLIB::Libcall LibFunc =
2556 OpVT == MVT::i32 ? RTLIB::SINTTOFP_I32_F128 : RTLIB::SINTTOFP_I64_F128;
2557 return TLI.LowerF128Op(Op, DAG, LibFunc, 1);
2558 }
2559
2560 // Expand if the operand type is illegal.
2561 if (!TLI.isTypeLegal(OpVT))
2562 return SDValue();
2563
2564 // Otherwise, Convert the int value to FP in an FP register.
2565 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2566 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2567 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2568}
2569
2571 const SparcTargetLowering &TLI,
2572 bool hasHardQuad) {
2573 EVT VT = Op.getValueType();
2574
2575 // Expand if it does not involve f128 or the target has support for
2576 // quad floating point instructions and the resulting type is legal.
2577 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2578 (hasHardQuad && TLI.isTypeLegal(VT)))
2579 return SDValue();
2580
2581 assert(VT == MVT::i32 || VT == MVT::i64);
2582
2583 return TLI.LowerF128Op(
2584 Op, DAG,
2585 VT == MVT::i32 ? RTLIB::FPTOUINT_F128_I32 : RTLIB::FPTOUINT_F128_I64, 1);
2586}
2587
2589 const SparcTargetLowering &TLI,
2590 bool hasHardQuad) {
2591 EVT OpVT = Op.getOperand(0).getValueType();
2592 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2593
2594 // Expand if it does not involve f128 or the target has support for
2595 // quad floating point instructions and the operand type is legal.
2596 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2597 return SDValue();
2598
2599 return TLI.LowerF128Op(Op, DAG,
2600 OpVT == MVT::i32 ? RTLIB::UINTTOFP_I32_F128
2601 : RTLIB::UINTTOFP_I64_F128,
2602 1);
2603}
2604
2606 const SparcTargetLowering &TLI, bool hasHardQuad,
2607 bool isV9, bool is64Bit) {
2608 SDValue Chain = Op.getOperand(0);
2609 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2610 SDValue LHS = Op.getOperand(2);
2611 SDValue RHS = Op.getOperand(3);
2612 SDValue Dest = Op.getOperand(4);
2613 SDLoc dl(Op);
2614 unsigned Opc, SPCC = ~0U;
2615
2616 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2617 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2619 assert(LHS.getValueType() == RHS.getValueType());
2620
2621 // Get the condition flag.
2622 SDValue CompareFlag;
2623 if (LHS.getValueType().isInteger()) {
2624 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2625 // and the RHS is zero we might be able to use a specialized branch.
2626 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2628 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2629 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2630 LHS);
2631
2632 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2633 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2634 if (isV9)
2635 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2636 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2637 else
2638 // Non-v9 targets don't have xcc.
2639 Opc = SPISD::BRICC;
2640 } else {
2641 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2642 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2643 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2644 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2645 } else {
2646 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2647 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2648 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2649 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2650 }
2651 }
2652 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2653 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2654}
2655
2657 const SparcTargetLowering &TLI, bool hasHardQuad,
2658 bool isV9, bool is64Bit) {
2659 SDValue LHS = Op.getOperand(0);
2660 SDValue RHS = Op.getOperand(1);
2661 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2662 SDValue TrueVal = Op.getOperand(2);
2663 SDValue FalseVal = Op.getOperand(3);
2664 SDLoc dl(Op);
2665 unsigned Opc, SPCC = ~0U;
2666
2667 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2668 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2670 assert(LHS.getValueType() == RHS.getValueType());
2671
2672 SDValue CompareFlag;
2673 if (LHS.getValueType().isInteger()) {
2674 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2675 // and the RHS is zero we might be able to use a specialized select.
2676 // All SELECT_CC between any two scalar integer types are eligible for
2677 // lowering to specialized instructions. Additionally, f32 and f64 types
2678 // are also eligible, but for f128 we can only use the specialized
2679 // instruction when we have hardquad.
2680 EVT ValType = TrueVal.getValueType();
2681 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2682 ValType == MVT::f64 ||
2683 (ValType == MVT::f128 && hasHardQuad);
2684 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2685 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2686 return DAG.getNode(
2687 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2688 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2689
2690 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2691 Opc = LHS.getValueType() == MVT::i32 ?
2692 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2693 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2694 } else {
2695 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2696 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2697 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2698 Opc = SPISD::SELECT_ICC;
2699 } else {
2700 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2701 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2702 Opc = SPISD::SELECT_FCC;
2703 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2704 }
2705 }
2706 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2707 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2708}
2709
2711 const SparcTargetLowering &TLI) {
2714 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2715
2716 // Need frame address to find the address of VarArgsFrameIndex.
2718
2719 // vastart just stores the address of the VarArgsFrameIndex slot into the
2720 // memory location argument.
2721 SDLoc DL(Op);
2722 SDValue Offset =
2723 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2724 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2725 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2726 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2727 MachinePointerInfo(SV));
2728}
2729
2731 SDNode *Node = Op.getNode();
2732 EVT VT = Node->getValueType(0);
2733 SDValue InChain = Node->getOperand(0);
2734 SDValue VAListPtr = Node->getOperand(1);
2735 EVT PtrVT = VAListPtr.getValueType();
2736 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2737 SDLoc DL(Node);
2738 SDValue VAList =
2739 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2740 // Increment the pointer, VAList, to the next vaarg.
2741 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2743 DL));
2744 // Store the incremented VAList to the legalized pointer.
2745 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2746 MachinePointerInfo(SV));
2747 // Load the actual argument out of the pointer VAList.
2748 // We can't count on greater alignment than the word size.
2749 return DAG.getLoad(
2750 VT, DL, InChain, VAList, MachinePointerInfo(),
2751 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2752}
2753
2755 const SparcSubtarget &Subtarget) {
2756 SDValue Chain = Op.getOperand(0);
2757 EVT VT = Op->getValueType(0);
2758 SDLoc DL(Op);
2759
2760 MCRegister SPReg = SP::O6;
2761 SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT);
2762
2763 // Unbias the stack pointer register.
2764 unsigned OffsetToStackStart = Subtarget.getStackPointerBias();
2765 // Move past the register save area: 8 in registers + 8 local registers.
2766 OffsetToStackStart += 16 * (Subtarget.is64Bit() ? 8 : 4);
2767 // Move past the struct return address slot (4 bytes) on SPARC 32-bit.
2768 if (!Subtarget.is64Bit())
2769 OffsetToStackStart += 4;
2770
2771 SDValue StackAddr = DAG.getNode(ISD::ADD, DL, VT, SP,
2772 DAG.getConstant(OffsetToStackStart, DL, VT));
2773 return DAG.getMergeValues({StackAddr, Chain}, DL);
2774}
2775
2777 const SparcSubtarget *Subtarget) {
2778 SDValue Chain = Op.getOperand(0);
2779 SDValue Size = Op.getOperand(1);
2780 SDValue Alignment = Op.getOperand(2);
2781 MaybeAlign MaybeAlignment =
2782 cast<ConstantSDNode>(Alignment)->getMaybeAlignValue();
2783 EVT VT = Size->getValueType(0);
2784 SDLoc dl(Op);
2785
2786 unsigned SPReg = SP::O6;
2787 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2788
2789 // The resultant pointer needs to be above the register spill area
2790 // at the bottom of the stack.
2791 unsigned regSpillArea;
2792 if (Subtarget->is64Bit()) {
2793 regSpillArea = 128;
2794 } else {
2795 // On Sparc32, the size of the spill area is 92. Unfortunately,
2796 // that's only 4-byte aligned, not 8-byte aligned (the stack
2797 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2798 // aligned dynamic allocation, we actually need to add 96 to the
2799 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2800
2801 // That also means adding 4 to the size of the allocation --
2802 // before applying the 8-byte rounding. Unfortunately, we the
2803 // value we get here has already had rounding applied. So, we need
2804 // to add 8, instead, wasting a bit more memory.
2805
2806 // Further, this only actually needs to be done if the required
2807 // alignment is > 4, but, we've lost that info by this point, too,
2808 // so we always apply it.
2809
2810 // (An alternative approach would be to always reserve 96 bytes
2811 // instead of the required 92, but then we'd waste 4 extra bytes
2812 // in every frame, not just those with dynamic stack allocations)
2813
2814 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2815
2816 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2817 DAG.getConstant(8, dl, VT));
2818 regSpillArea = 96;
2819 }
2820
2821 int64_t Bias = Subtarget->getStackPointerBias();
2822
2823 // Debias and increment SP past the reserved spill area.
2824 // We need the SP to point to the first usable region before calculating
2825 // anything to prevent any of the pointers from becoming out of alignment when
2826 // we rebias the SP later on.
2827 SDValue StartOfUsableStack = DAG.getNode(
2828 ISD::ADD, dl, VT, SP, DAG.getConstant(regSpillArea + Bias, dl, VT));
2829 SDValue AllocatedPtr =
2830 DAG.getNode(ISD::SUB, dl, VT, StartOfUsableStack, Size);
2831
2832 bool IsOveraligned = MaybeAlignment.has_value();
2833 SDValue AlignedPtr =
2834 IsOveraligned
2835 ? DAG.getNode(ISD::AND, dl, VT, AllocatedPtr,
2836 DAG.getSignedConstant(-MaybeAlignment->value(), dl, VT))
2837 : AllocatedPtr;
2838
2839 // Now that we are done, restore the bias and reserved spill area.
2840 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, AlignedPtr,
2841 DAG.getConstant(regSpillArea + Bias, dl, VT));
2842 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP);
2843 SDValue Ops[2] = {AlignedPtr, Chain};
2844 return DAG.getMergeValues(Ops, dl);
2845}
2846
2847
2849 SDLoc dl(Op);
2850 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2851 dl, MVT::Other, DAG.getEntryNode());
2852 return Chain;
2853}
2854
2856 const SparcSubtarget *Subtarget,
2857 bool AlwaysFlush = false) {
2859 MFI.setFrameAddressIsTaken(true);
2860
2861 EVT VT = Op.getValueType();
2862 SDLoc dl(Op);
2863 unsigned FrameReg = SP::I6;
2864 unsigned stackBias = Subtarget->getStackPointerBias();
2865
2866 SDValue FrameAddr;
2867 SDValue Chain;
2868
2869 // flush first to make sure the windowed registers' values are in stack
2870 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2871
2872 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2873
2874 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2875
2876 while (depth--) {
2877 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2878 DAG.getIntPtrConstant(Offset, dl));
2879 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2880 }
2881 if (Subtarget->is64Bit())
2882 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2883 DAG.getIntPtrConstant(stackBias, dl));
2884 return FrameAddr;
2885}
2886
2887
2889 const SparcSubtarget *Subtarget) {
2890
2891 uint64_t depth = Op.getConstantOperandVal(0);
2892
2893 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2894
2895}
2896
2898 const SparcTargetLowering &TLI,
2899 const SparcSubtarget *Subtarget) {
2901 MachineFrameInfo &MFI = MF.getFrameInfo();
2902 MFI.setReturnAddressIsTaken(true);
2903
2904 EVT VT = Op.getValueType();
2905 SDLoc dl(Op);
2906 uint64_t depth = Op.getConstantOperandVal(0);
2907
2908 SDValue RetAddr;
2909 if (depth == 0) {
2910 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2911 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2912 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2913 return RetAddr;
2914 }
2915
2916 // Need frame address to find return address of the caller.
2917 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2918
2919 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2920 SDValue Ptr = DAG.getNode(ISD::ADD,
2921 dl, VT,
2922 FrameAddr,
2923 DAG.getIntPtrConstant(Offset, dl));
2924 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2925
2926 return RetAddr;
2927}
2928
2929static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2930 unsigned opcode) {
2931 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2932 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2933
2934 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2935 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2936 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2937
2938 // Note: in little-endian, the floating-point value is stored in the
2939 // registers are in the opposite order, so the subreg with the sign
2940 // bit is the highest-numbered (odd), rather than the
2941 // lowest-numbered (even).
2942
2943 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2944 SrcReg64);
2945 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2946 SrcReg64);
2947
2948 if (DAG.getDataLayout().isLittleEndian())
2949 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2950 else
2951 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2952
2953 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2954 dl, MVT::f64), 0);
2955 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2956 DstReg64, Hi32);
2957 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2958 DstReg64, Lo32);
2959 return DstReg64;
2960}
2961
2962// Lower a f128 load into two f64 loads.
2964{
2965 SDLoc dl(Op);
2966 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2967 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2968
2969 Align Alignment = commonAlignment(LdNode->getBaseAlign(), 8);
2970
2971 SDValue Hi64 =
2972 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2973 LdNode->getPointerInfo(), Alignment);
2974 EVT addrVT = LdNode->getBasePtr().getValueType();
2975 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2976 LdNode->getBasePtr(),
2977 DAG.getConstant(8, dl, addrVT));
2978 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2979 LdNode->getPointerInfo().getWithOffset(8),
2980 Alignment);
2981
2982 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2983 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2984
2985 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2986 dl, MVT::f128);
2987 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2988 MVT::f128,
2989 SDValue(InFP128, 0),
2990 Hi64,
2991 SubRegEven);
2992 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2993 MVT::f128,
2994 SDValue(InFP128, 0),
2995 Lo64,
2996 SubRegOdd);
2997 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2998 SDValue(Lo64.getNode(), 1) };
2999 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3000 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
3001 return DAG.getMergeValues(Ops, dl);
3002}
3003
3005{
3006 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
3007
3008 EVT MemVT = LdNode->getMemoryVT();
3009 if (MemVT == MVT::f128)
3010 return LowerF128Load(Op, DAG);
3011
3012 return Op;
3013}
3014
3015// Lower a f128 store into two f64 stores.
3017 SDLoc dl(Op);
3018 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
3019 assert(StNode->getOffset().isUndef() && "Unexpected node type");
3020
3021 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
3022 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
3023
3024 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3025 dl,
3026 MVT::f64,
3027 StNode->getValue(),
3028 SubRegEven);
3029 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3030 dl,
3031 MVT::f64,
3032 StNode->getValue(),
3033 SubRegOdd);
3034
3035 Align Alignment = commonAlignment(StNode->getBaseAlign(), 8);
3036
3037 SDValue OutChains[2];
3038 OutChains[0] =
3039 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
3040 StNode->getBasePtr(), StNode->getPointerInfo(),
3041 Alignment);
3042 EVT addrVT = StNode->getBasePtr().getValueType();
3043 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
3044 StNode->getBasePtr(),
3045 DAG.getConstant(8, dl, addrVT));
3046 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
3047 StNode->getPointerInfo().getWithOffset(8),
3048 Alignment);
3049 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3050}
3051
3053{
3054 SDLoc dl(Op);
3055 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3056
3057 EVT MemVT = St->getMemoryVT();
3058 if (MemVT == MVT::f128)
3059 return LowerF128Store(Op, DAG);
3060
3061 if (MemVT == MVT::i64) {
3062 // Custom handling for i64 stores: turn it into a bitcast and a
3063 // v2i32 store.
3064 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3065 SDValue Chain = DAG.getStore(
3066 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3067 St->getBaseAlign(), St->getMemOperand()->getFlags(), St->getAAInfo());
3068 return Chain;
3069 }
3070
3071 return SDValue();
3072}
3073
3075 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3076 && "invalid opcode");
3077
3078 SDLoc dl(Op);
3079
3080 if (Op.getValueType() == MVT::f64)
3081 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3082 if (Op.getValueType() != MVT::f128)
3083 return Op;
3084
3085 // Lower fabs/fneg on f128 to fabs/fneg on f64
3086 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3087 // (As with LowerF64Op, on little-endian, we need to negate the odd
3088 // subreg)
3089
3090 SDValue SrcReg128 = Op.getOperand(0);
3091 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3092 SrcReg128);
3093 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3094 SrcReg128);
3095
3096 if (DAG.getDataLayout().isLittleEndian()) {
3097 if (isV9)
3098 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3099 else
3100 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3101 } else {
3102 if (isV9)
3103 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3104 else
3105 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3106 }
3107
3108 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3109 dl, MVT::f128), 0);
3110 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3111 DstReg128, Hi64);
3112 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3113 DstReg128, Lo64);
3114 return DstReg128;
3115}
3116
3118 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3119 // Expand with a fence.
3120 return SDValue();
3121 }
3122
3123 // Monotonic load/stores are legal.
3124 return Op;
3125}
3126
3128 SelectionDAG &DAG) const {
3129 unsigned IntNo = Op.getConstantOperandVal(0);
3130 switch (IntNo) {
3131 default: return SDValue(); // Don't custom lower most intrinsics.
3132 case Intrinsic::thread_pointer: {
3133 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3134 return DAG.getRegister(SP::G7, PtrVT);
3135 }
3136 }
3137}
3138
3141
3142 bool hasHardQuad = Subtarget->hasHardQuad();
3143 bool isV9 = Subtarget->isV9();
3144 bool is64Bit = Subtarget->is64Bit();
3145
3146 switch (Op.getOpcode()) {
3147 default: llvm_unreachable("Should not custom lower this!");
3148
3149 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3150 Subtarget);
3151 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3152 Subtarget);
3154 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3155 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3156 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3157 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3158 hasHardQuad);
3159 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3160 hasHardQuad);
3161 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3162 hasHardQuad);
3163 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3164 hasHardQuad);
3165 case ISD::BR_CC:
3166 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3167 case ISD::SELECT_CC:
3168 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3169 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3170 case ISD::VAARG: return LowerVAARG(Op, DAG);
3172 Subtarget);
3173 case ISD::STACKADDRESS:
3174 return LowerSTACKADDRESS(Op, DAG, *Subtarget);
3175
3176 case ISD::LOAD: return LowerLOAD(Op, DAG);
3177 case ISD::STORE: return LowerSTORE(Op, DAG);
3178 case ISD::FADD:
3179 return LowerF128Op(Op, DAG, RTLIB::ADD_F128, 2);
3180 case ISD::FSUB:
3181 return LowerF128Op(Op, DAG, RTLIB::SUB_F128, 2);
3182 case ISD::FMUL:
3183 return LowerF128Op(Op, DAG, RTLIB::MUL_F128, 2);
3184 case ISD::FDIV:
3185 return LowerF128Op(Op, DAG, RTLIB::DIV_F128, 2);
3186 case ISD::FSQRT:
3187 return LowerF128Op(Op, DAG, RTLIB::SQRT_F128, 1);
3188 case ISD::FABS:
3189 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3190 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3191 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3192 case ISD::ATOMIC_LOAD:
3193 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3195 }
3196}
3197
3199 const SDLoc &DL,
3200 SelectionDAG &DAG) const {
3201 APInt V = C->getValueAPF().bitcastToAPInt();
3202 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3203 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3204 if (DAG.getDataLayout().isLittleEndian())
3205 std::swap(Lo, Hi);
3206 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3207}
3208
3210 DAGCombinerInfo &DCI) const {
3211 SDLoc dl(N);
3212 SDValue Src = N->getOperand(0);
3213
3214 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3215 Src.getSimpleValueType() == MVT::f64)
3217
3218 return SDValue();
3219}
3220
3222 DAGCombinerInfo &DCI) const {
3223 switch (N->getOpcode()) {
3224 default:
3225 break;
3226 case ISD::BITCAST:
3227 return PerformBITCASTCombine(N, DCI);
3228 }
3229 return SDValue();
3230}
3231
3234 MachineBasicBlock *BB) const {
3235 switch (MI.getOpcode()) {
3236 default: llvm_unreachable("Unknown SELECT_CC!");
3237 case SP::SELECT_CC_Int_ICC:
3238 case SP::SELECT_CC_FP_ICC:
3239 case SP::SELECT_CC_DFP_ICC:
3240 case SP::SELECT_CC_QFP_ICC:
3241 if (Subtarget->isV9())
3242 return expandSelectCC(MI, BB, SP::BPICC);
3243 return expandSelectCC(MI, BB, SP::BCOND);
3244 case SP::SELECT_CC_Int_XCC:
3245 case SP::SELECT_CC_FP_XCC:
3246 case SP::SELECT_CC_DFP_XCC:
3247 case SP::SELECT_CC_QFP_XCC:
3248 return expandSelectCC(MI, BB, SP::BPXCC);
3249 case SP::SELECT_CC_Int_FCC:
3250 case SP::SELECT_CC_FP_FCC:
3251 case SP::SELECT_CC_DFP_FCC:
3252 case SP::SELECT_CC_QFP_FCC:
3253 if (Subtarget->isV9())
3254 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3255 return expandSelectCC(MI, BB, SP::FBCOND);
3256 }
3257}
3258
3261 unsigned BROpcode) const {
3262 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3263 DebugLoc dl = MI.getDebugLoc();
3264 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3265
3266 // To "insert" a SELECT_CC instruction, we actually have to insert the
3267 // triangle control-flow pattern. The incoming instruction knows the
3268 // destination vreg to set, the condition code register to branch on, the
3269 // true/false values to select between, and the condition code for the branch.
3270 //
3271 // We produce the following control flow:
3272 // ThisMBB
3273 // | \
3274 // | IfFalseMBB
3275 // | /
3276 // SinkMBB
3277 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3279
3280 MachineBasicBlock *ThisMBB = BB;
3281 MachineFunction *F = BB->getParent();
3282 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3283 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3284 F->insert(It, IfFalseMBB);
3285 F->insert(It, SinkMBB);
3286
3287 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3288 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3289 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3290 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3291
3292 // Set the new successors for ThisMBB.
3293 ThisMBB->addSuccessor(IfFalseMBB);
3294 ThisMBB->addSuccessor(SinkMBB);
3295
3296 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3297 .addMBB(SinkMBB)
3298 .addImm(CC);
3299
3300 // IfFalseMBB just falls through to SinkMBB.
3301 IfFalseMBB->addSuccessor(SinkMBB);
3302
3303 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3304 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3305 MI.getOperand(0).getReg())
3306 .addReg(MI.getOperand(1).getReg())
3307 .addMBB(ThisMBB)
3308 .addReg(MI.getOperand(2).getReg())
3309 .addMBB(IfFalseMBB);
3310
3311 MI.eraseFromParent(); // The pseudo instruction is gone now.
3312 return SinkMBB;
3313}
3314
3315//===----------------------------------------------------------------------===//
3316// Sparc Inline Assembly Support
3317//===----------------------------------------------------------------------===//
3318
3319/// getConstraintType - Given a constraint letter, return the type of
3320/// constraint it is for this target.
3323 if (Constraint.size() == 1) {
3324 switch (Constraint[0]) {
3325 default: break;
3326 case 'r':
3327 case 'f':
3328 case 'e':
3329 return C_RegisterClass;
3330 case 'I': // SIMM13
3331 return C_Immediate;
3332 }
3333 }
3334
3335 return TargetLowering::getConstraintType(Constraint);
3336}
3337
3340 const char *constraint) const {
3342 Value *CallOperandVal = info.CallOperandVal;
3343 // If we don't have a value, we can't do a match,
3344 // but allow it at the lowest weight.
3345 if (!CallOperandVal)
3346 return CW_Default;
3347
3348 // Look at the constraint type.
3349 switch (*constraint) {
3350 default:
3352 break;
3353 case 'I': // SIMM13
3354 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3355 if (isInt<13>(C->getSExtValue()))
3356 weight = CW_Constant;
3357 }
3358 break;
3359 }
3360 return weight;
3361}
3362
3363/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3364/// vector. If it is invalid, don't add anything to Ops.
3366 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3367 SelectionDAG &DAG) const {
3368 SDValue Result;
3369
3370 // Only support length 1 constraints for now.
3371 if (Constraint.size() > 1)
3372 return;
3373
3374 char ConstraintLetter = Constraint[0];
3375 switch (ConstraintLetter) {
3376 default: break;
3377 case 'I':
3379 if (isInt<13>(C->getSExtValue())) {
3380 Result = DAG.getSignedTargetConstant(C->getSExtValue(), SDLoc(Op),
3381 Op.getValueType());
3382 break;
3383 }
3384 return;
3385 }
3386 }
3387
3388 if (Result.getNode()) {
3389 Ops.push_back(Result);
3390 return;
3391 }
3393}
3394
3395std::pair<unsigned, const TargetRegisterClass *>
3397 StringRef Constraint,
3398 MVT VT) const {
3399 if (Constraint.empty())
3400 return std::make_pair(0U, nullptr);
3401
3402 if (Constraint.size() == 1) {
3403 switch (Constraint[0]) {
3404 case 'r':
3405 if (VT == MVT::v2i32)
3406 return std::make_pair(0U, &SP::IntPairRegClass);
3407 else if (Subtarget->is64Bit())
3408 return std::make_pair(0U, &SP::I64RegsRegClass);
3409 else
3410 return std::make_pair(0U, &SP::IntRegsRegClass);
3411 case 'f':
3412 if (VT == MVT::f32 || VT == MVT::i32)
3413 return std::make_pair(0U, &SP::FPRegsRegClass);
3414 else if (VT == MVT::f64 || VT == MVT::i64)
3415 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3416 else if (VT == MVT::f128)
3417 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3418 // This will generate an error message
3419 return std::make_pair(0U, nullptr);
3420 case 'e':
3421 if (VT == MVT::f32 || VT == MVT::i32)
3422 return std::make_pair(0U, &SP::FPRegsRegClass);
3423 else if (VT == MVT::f64 || VT == MVT::i64 )
3424 return std::make_pair(0U, &SP::DFPRegsRegClass);
3425 else if (VT == MVT::f128)
3426 return std::make_pair(0U, &SP::QFPRegsRegClass);
3427 // This will generate an error message
3428 return std::make_pair(0U, nullptr);
3429 }
3430 }
3431
3432 if (Constraint.front() != '{')
3433 return std::make_pair(0U, nullptr);
3434
3435 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3436 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3437 if (RegName.empty())
3438 return std::make_pair(0U, nullptr);
3439
3440 unsigned long long RegNo;
3441 // Handle numbered register aliases.
3442 if (RegName[0] == 'r' &&
3443 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3444 // r0-r7 -> g0-g7
3445 // r8-r15 -> o0-o7
3446 // r16-r23 -> l0-l7
3447 // r24-r31 -> i0-i7
3448 if (RegNo > 31)
3449 return std::make_pair(0U, nullptr);
3450 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3451 char RegType = RegTypes[RegNo / 8];
3452 char RegIndex = '0' + (RegNo % 8);
3453 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3454 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3455 }
3456
3457 // Rewrite the fN constraint according to the value type if needed.
3458 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3459 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3460 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3462 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3463 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3465 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3466 } else {
3467 return std::make_pair(0U, nullptr);
3468 }
3469 }
3470
3471 auto ResultPair =
3473 if (!ResultPair.second)
3474 return std::make_pair(0U, nullptr);
3475
3476 // Force the use of I64Regs over IntRegs for 64-bit values.
3477 if (Subtarget->is64Bit() && VT == MVT::i64) {
3478 assert(ResultPair.second == &SP::IntRegsRegClass &&
3479 "Unexpected register class");
3480 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3481 }
3482
3483 return ResultPair;
3484}
3485
3486bool
3488 // The Sparc target isn't yet aware of offsets.
3489 return false;
3490}
3491
3494 SelectionDAG &DAG) const {
3495
3496 SDLoc dl(N);
3497
3498 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3499
3500 switch (N->getOpcode()) {
3501 default:
3502 llvm_unreachable("Do not know how to custom type legalize this operation!");
3503
3504 case ISD::FP_TO_SINT:
3505 case ISD::FP_TO_UINT:
3506 // Custom lower only if it involves f128 or i64.
3507 if (N->getOperand(0).getValueType() != MVT::f128
3508 || N->getValueType(0) != MVT::i64)
3509 return;
3510 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3511 ? RTLIB::FPTOSINT_F128_I64
3512 : RTLIB::FPTOUINT_F128_I64);
3513
3514 Results.push_back(LowerF128Op(SDValue(N, 0), DAG, libCall, 1));
3515 return;
3516 case ISD::READCYCLECOUNTER: {
3517 assert(Subtarget->hasLeonCycleCounter());
3518 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3519 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3520 SDValue Ops[] = { Lo, Hi };
3521 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3522 Results.push_back(Pair);
3523 Results.push_back(N->getOperand(0));
3524 return;
3525 }
3526 case ISD::SINT_TO_FP:
3527 case ISD::UINT_TO_FP:
3528 // Custom lower only if it involves f128 or i64.
3529 if (N->getValueType(0) != MVT::f128
3530 || N->getOperand(0).getValueType() != MVT::i64)
3531 return;
3532
3533 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3534 ? RTLIB::SINTTOFP_I64_F128
3535 : RTLIB::UINTTOFP_I64_F128);
3536
3537 Results.push_back(LowerF128Op(SDValue(N, 0), DAG, libCall, 1));
3538 return;
3539 case ISD::LOAD: {
3541 // Custom handling only for i64: turn i64 load into a v2i32 load,
3542 // and a bitcast.
3543 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3544 return;
3545
3546 SDLoc dl(N);
3547 SDValue LoadRes = DAG.getExtLoad(
3548 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3549 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getBaseAlign(),
3550 Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3551
3552 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3553 Results.push_back(Res);
3554 Results.push_back(LoadRes.getValue(1));
3555 return;
3556 }
3557 }
3558}
3559
3560// Override to enable LOAD_STACK_GUARD lowering on Linux.
3562 if (!Subtarget->getTargetTriple().isOSLinux())
3564 return true;
3565}
3566
3568 if (Subtarget->isVIS3())
3569 return VT == MVT::f32 || VT == MVT::f64;
3570 return false;
3571}
3572
3574 bool ForCodeSize) const {
3575 if (VT != MVT::f32 && VT != MVT::f64)
3576 return false;
3577 if (Subtarget->isVIS() && Imm.isZero())
3578 return true;
3579 if (Subtarget->isVIS3())
3580 return Imm.isExactlyValue(+0.5) || Imm.isExactlyValue(-0.5) ||
3581 Imm.getExactLog2Abs() == -1;
3582 return false;
3583}
3584
3585bool SparcTargetLowering::isCtlzFast() const { return Subtarget->isVIS3(); }
3586
3588 // We lack native cttz, however,
3589 // On 64-bit targets it is cheap to implement it in terms of popc.
3590 if (Subtarget->is64Bit() && Subtarget->usePopc())
3591 return true;
3592 // Otherwise, implementing cttz in terms of ctlz is still cheap.
3593 return isCheapToSpeculateCtlz(Ty);
3594}
3595
3597 EVT VT) const {
3598 return Subtarget->isUA2007() && !Subtarget->useSoftFloat();
3599}
3600
3602 SDNode *Node) const {
3603 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3604 // If the result is dead, replace it with %g0.
3605 if (!Node->hasAnyUseOfValue(0))
3606 MI.getOperand(0).setReg(SP::G0);
3607}
3608
3610 Instruction *Inst,
3611 AtomicOrdering Ord) const {
3612 bool HasStoreSemantics =
3614 if (HasStoreSemantics && isReleaseOrStronger(Ord))
3615 return Builder.CreateFence(AtomicOrdering::Release);
3616 return nullptr;
3617}
3618
3620 Instruction *Inst,
3621 AtomicOrdering Ord) const {
3622 // V8 loads already come with implicit acquire barrier so there's no need to
3623 // emit it again.
3624 bool HasLoadSemantics = isa<AtomicCmpXchgInst, AtomicRMWInst, LoadInst>(Inst);
3625 if (Subtarget->isV9() && HasLoadSemantics && isAcquireOrStronger(Ord))
3626 return Builder.CreateFence(AtomicOrdering::Acquire);
3627
3628 // SC plain stores would need a trailing full barrier.
3630 return Builder.CreateFence(Ord);
3631 return nullptr;
3632}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:54
#define G(x, y, z)
Definition MD5.cpp:55
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static constexpr MCPhysReg SPReg
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerSTACKADDRESS(SDValue Op, SelectionDAG &DAG, const SparcSubtarget &Subtarget)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI, const MachineFunction &MF)
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static void emitReservedArgRegCallError(const MachineFunction &MF)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
This file describes how to lower LLVM code to machine code.
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
LLVM Basic Block Representation.
Definition BasicBlock.h:62
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:214
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
A debug info location.
Definition DebugLoc.h:123
Diagnostic information for unsupported feature in backend.
const Function & getFunction() const
Definition Function.h:164
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition Function.h:687
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI CallingConv::ID getLibcallImplCallingConv(RTLIB::LibcallImpl Call) const
Get the CallingConv that should be used for the specified libcall.
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getBaseAlign() const
Returns alignment and volatility of the memory access.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
const LibcallLoweringInfo & getLibcalls() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool useSoftFloat() const override
SDValue bitcastConstantFPToInt(ConstantFPSDNode *C, const SDLoc &DL, SelectionDAG &DAG) const
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool IsEligibleForTailCallOptimization(CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
computeKnownBitsForTargetNode - Determine which of the bits specified in Mask are known to be either ...
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue PerformBITCASTCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, RTLIB::Libcall LibFunc, unsigned numArgs) const
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerF128Compare(SDValue LHS, SDValue RHS, unsigned &SPCC, const SDLoc &DL, SelectionDAG &DAG) const
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
char back() const
back - Get the last character in the string.
Definition StringRef.h:155
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
char front() const
front - Get the first character in the string.
Definition StringRef.h:149
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
bool isFP128Ty() const
Return true if this is 'fp128'.
Definition Type.h:162
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:818
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:787
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ STACKADDRESS
STACKADDRESS - Represents the llvm.stackaddress intrinsic.
Definition ISDOpcodes.h:127
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:778
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:294
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:852
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ GlobalAddress
Definition ISDOpcodes.h:88
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:879
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:992
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:254
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ GlobalTLSAddress
Definition ISDOpcodes.h:89
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:843
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:786
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:795
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:233
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:230
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:703
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:764
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:849
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:810
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:887
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:977
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:925
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:738
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:205
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:304
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:565
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:958
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:855
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:832
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:556
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
@ FCC_ULE
Definition Sparc.h:74
@ FCC_UG
Definition Sparc.h:64
@ ICC_G
Definition Sparc.h:46
@ REG_LEZ
Definition Sparc.h:97
@ ICC_L
Definition Sparc.h:49
@ FCC_NE
Definition Sparc.h:68
@ ICC_CS
Definition Sparc.h:53
@ FCC_LG
Definition Sparc.h:67
@ ICC_LEU
Definition Sparc.h:51
@ FCC_LE
Definition Sparc.h:73
@ ICC_LE
Definition Sparc.h:47
@ FCC_U
Definition Sparc.h:62
@ ICC_GE
Definition Sparc.h:48
@ FCC_E
Definition Sparc.h:69
@ REG_LZ
Definition Sparc.h:98
@ FCC_L
Definition Sparc.h:65
@ ICC_GU
Definition Sparc.h:50
@ FCC_O
Definition Sparc.h:75
@ ICC_NE
Definition Sparc.h:44
@ FCC_UE
Definition Sparc.h:70
@ REG_NZ
Definition Sparc.h:99
@ ICC_E
Definition Sparc.h:45
@ FCC_GE
Definition Sparc.h:71
@ FCC_UGE
Definition Sparc.h:72
@ REG_Z
Definition Sparc.h:96
@ ICC_CC
Definition Sparc.h:52
@ REG_GEZ
Definition Sparc.h:101
@ FCC_G
Definition Sparc.h:63
@ FCC_UL
Definition Sparc.h:66
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
std::string utostr(uint64_t X, bool isNeg=false)
bool isReleaseOrStronger(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
bool isAcquireOrStronger(AtomicOrdering AO)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition ValueTypes.h:94
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:395
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition KnownBits.h:314
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})