LLVM 20.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
18#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/Module.h"
37using namespace llvm;
38
39
40//===----------------------------------------------------------------------===//
41// Calling Convention Implementation
42//===----------------------------------------------------------------------===//
43
44static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
45 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
46 ISD::ArgFlagsTy &ArgFlags, CCState &State)
47{
48 assert (ArgFlags.isSRet());
49
50 // Assign SRet argument.
51 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
52 0,
53 LocVT, LocInfo));
54 return true;
55}
56
57static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
58 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
59 ISD::ArgFlagsTy &ArgFlags, CCState &State)
60{
61 static const MCPhysReg RegList[] = {
62 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
63 };
64 // Try to get first reg.
65 if (Register Reg = State.AllocateReg(RegList)) {
66 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
67 } else {
68 // Assign whole thing in stack.
70 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
71 return true;
72 }
73
74 // Try to get second reg.
75 if (Register Reg = State.AllocateReg(RegList))
76 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
77 else
79 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
80 return true;
81}
82
83static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
84 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
85 ISD::ArgFlagsTy &ArgFlags, CCState &State)
86{
87 static const MCPhysReg RegList[] = {
88 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
89 };
90
91 // Try to get first reg.
92 if (Register Reg = State.AllocateReg(RegList))
93 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
94 else
95 return false;
96
97 // Try to get second reg.
98 if (Register Reg = State.AllocateReg(RegList))
99 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
100 else
101 return false;
102
103 return true;
104}
105
106// Allocate a full-sized argument for the 64-bit ABI.
107static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
108 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
109 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
110 assert((LocVT == MVT::f32 || LocVT == MVT::f128
111 || LocVT.getSizeInBits() == 64) &&
112 "Can't handle non-64 bits locations");
113
114 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
115 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
116 Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
117 unsigned Offset = State.AllocateStack(size, alignment);
118 unsigned Reg = 0;
119
120 if (LocVT == MVT::i64 && Offset < 6*8)
121 // Promote integers to %i0-%i5.
122 Reg = SP::I0 + Offset/8;
123 else if (LocVT == MVT::f64 && Offset < 16*8)
124 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
125 Reg = SP::D0 + Offset/8;
126 else if (LocVT == MVT::f32 && Offset < 16*8)
127 // Promote floats to %f1, %f3, ...
128 Reg = SP::F1 + Offset/4;
129 else if (LocVT == MVT::f128 && Offset < 16*8)
130 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
131 Reg = SP::Q0 + Offset/16;
132
133 // Promote to register when possible, otherwise use the stack slot.
134 if (Reg) {
135 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
136 return true;
137 }
138
139 // Bail out if this is a return CC and we run out of registers to place
140 // values into.
141 if (IsReturn)
142 return false;
143
144 // This argument goes on the stack in an 8-byte slot.
145 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
146 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
147 if (LocVT == MVT::f32)
148 Offset += 4;
149
150 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
151 return true;
152}
153
154// Allocate a half-sized argument for the 64-bit ABI.
155//
156// This is used when passing { float, int } structs by value in registers.
157static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
158 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
159 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
160 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
161 unsigned Offset = State.AllocateStack(4, Align(4));
162
163 if (LocVT == MVT::f32 && Offset < 16*8) {
164 // Promote floats to %f0-%f31.
165 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
166 LocVT, LocInfo));
167 return true;
168 }
169
170 if (LocVT == MVT::i32 && Offset < 6*8) {
171 // Promote integers to %i0-%i5, using half the register.
172 unsigned Reg = SP::I0 + Offset/8;
173 LocVT = MVT::i64;
174 LocInfo = CCValAssign::AExt;
175
176 // Set the Custom bit if this i32 goes in the high bits of a register.
177 if (Offset % 8 == 0)
178 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
179 LocVT, LocInfo));
180 else
181 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
182 return true;
183 }
184
185 // Bail out if this is a return CC and we run out of registers to place
186 // values into.
187 if (IsReturn)
188 return false;
189
190 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
191 return true;
192}
193
194static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
195 CCValAssign::LocInfo &LocInfo,
196 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
197 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
198 State);
199}
200
201static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
202 CCValAssign::LocInfo &LocInfo,
203 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
204 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
205 State);
206}
207
208static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
209 CCValAssign::LocInfo &LocInfo,
210 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
211 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
212 State);
213}
214
215static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
216 CCValAssign::LocInfo &LocInfo,
217 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
218 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
219 State);
220}
221
222#include "SparcGenCallingConv.inc"
223
224// The calling conventions in SparcCallingConv.td are described in terms of the
225// callee's register window. This function translates registers to the
226// corresponding caller window %o register.
227static unsigned toCallerWindow(unsigned Reg) {
228 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
229 "Unexpected enum");
230 if (Reg >= SP::I0 && Reg <= SP::I7)
231 return Reg - SP::I0 + SP::O0;
232 return Reg;
233}
234
236 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
237 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
238 const Type *RetTy) const {
240 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
241 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
242 : RetCC_Sparc32);
243}
244
247 bool IsVarArg,
249 const SmallVectorImpl<SDValue> &OutVals,
250 const SDLoc &DL, SelectionDAG &DAG) const {
251 if (Subtarget->is64Bit())
252 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
253 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
254}
255
258 bool IsVarArg,
260 const SmallVectorImpl<SDValue> &OutVals,
261 const SDLoc &DL, SelectionDAG &DAG) const {
263
264 // CCValAssign - represent the assignment of the return value to locations.
266
267 // CCState - Info about the registers and stack slot.
268 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
269 *DAG.getContext());
270
271 // Analyze return values.
272 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
273
274 SDValue Glue;
275 SmallVector<SDValue, 4> RetOps(1, Chain);
276 // Make room for the return address offset.
277 RetOps.push_back(SDValue());
278
279 // Copy the result values into the output registers.
280 for (unsigned i = 0, realRVLocIdx = 0;
281 i != RVLocs.size();
282 ++i, ++realRVLocIdx) {
283 CCValAssign &VA = RVLocs[i];
284 assert(VA.isRegLoc() && "Can only return in registers!");
285
286 SDValue Arg = OutVals[realRVLocIdx];
287
288 if (VA.needsCustom()) {
289 assert(VA.getLocVT() == MVT::v2i32);
290 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
291 // happen by default if this wasn't a legal type)
292
293 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
294 Arg,
296 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
297 Arg,
299
300 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
301 Glue = Chain.getValue(1);
302 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
303 VA = RVLocs[++i]; // skip ahead to next loc
304 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
305 Glue);
306 } else
307 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
308
309 // Guarantee that all emitted copies are stuck together with flags.
310 Glue = Chain.getValue(1);
311 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
312 }
313
314 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
315 // If the function returns a struct, copy the SRetReturnReg to I0
316 if (MF.getFunction().hasStructRetAttr()) {
318 Register Reg = SFI->getSRetReturnReg();
319 if (!Reg)
320 llvm_unreachable("sret virtual register not created in the entry block");
321 auto PtrVT = getPointerTy(DAG.getDataLayout());
322 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
323 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
324 Glue = Chain.getValue(1);
325 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
326 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
327 }
328
329 RetOps[0] = Chain; // Update chain.
330 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
331
332 // Add the glue if we have it.
333 if (Glue.getNode())
334 RetOps.push_back(Glue);
335
336 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
337}
338
339// Lower return values for the 64-bit ABI.
340// Return values are passed the exactly the same way as function arguments.
343 bool IsVarArg,
345 const SmallVectorImpl<SDValue> &OutVals,
346 const SDLoc &DL, SelectionDAG &DAG) const {
347 // CCValAssign - represent the assignment of the return value to locations.
349
350 // CCState - Info about the registers and stack slot.
351 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
352 *DAG.getContext());
353
354 // Analyze return values.
355 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
356
357 SDValue Glue;
358 SmallVector<SDValue, 4> RetOps(1, Chain);
359
360 // The second operand on the return instruction is the return address offset.
361 // The return address is always %i7+8 with the 64-bit ABI.
362 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
363
364 // Copy the result values into the output registers.
365 for (unsigned i = 0; i != RVLocs.size(); ++i) {
366 CCValAssign &VA = RVLocs[i];
367 assert(VA.isRegLoc() && "Can only return in registers!");
368 SDValue OutVal = OutVals[i];
369
370 // Integer return values must be sign or zero extended by the callee.
371 switch (VA.getLocInfo()) {
372 case CCValAssign::Full: break;
374 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
375 break;
377 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
378 break;
380 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
381 break;
382 default:
383 llvm_unreachable("Unknown loc info!");
384 }
385
386 // The custom bit on an i32 return value indicates that it should be passed
387 // in the high bits of the register.
388 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
389 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
390 DAG.getConstant(32, DL, MVT::i32));
391
392 // The next value may go in the low bits of the same register.
393 // Handle both at once.
394 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
395 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
396 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
397 // Skip the next value, it's already done.
398 ++i;
399 }
400 }
401
402 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
403
404 // Guarantee that all emitted copies are stuck together with flags.
405 Glue = Chain.getValue(1);
406 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
407 }
408
409 RetOps[0] = Chain; // Update chain.
410
411 // Add the flag if we have it.
412 if (Glue.getNode())
413 RetOps.push_back(Glue);
414
415 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
416}
417
419 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
420 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
421 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
422 if (Subtarget->is64Bit())
423 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
424 DL, DAG, InVals);
425 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
426 DL, DAG, InVals);
427}
428
429/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
430/// passed in either one or two GPRs, including FP values. TODO: we should
431/// pass FP values in FP registers for fastcc functions.
433 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
434 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
435 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
437 MachineRegisterInfo &RegInfo = MF.getRegInfo();
439
440 // Assign locations to all of the incoming arguments.
442 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
443 *DAG.getContext());
444 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
445
446 const unsigned StackOffset = 92;
447 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
448
449 unsigned InIdx = 0;
450 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
451 CCValAssign &VA = ArgLocs[i];
452
453 if (Ins[InIdx].Flags.isSRet()) {
454 if (InIdx != 0)
455 report_fatal_error("sparc only supports sret on the first parameter");
456 // Get SRet from [%fp+64].
457 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
458 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
459 SDValue Arg =
460 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
461 InVals.push_back(Arg);
462 continue;
463 }
464
465 if (VA.isRegLoc()) {
466 if (VA.needsCustom()) {
467 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
468
469 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
470 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
471 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
472
473 assert(i+1 < e);
474 CCValAssign &NextVA = ArgLocs[++i];
475
476 SDValue LoVal;
477 if (NextVA.isMemLoc()) {
478 int FrameIdx = MF.getFrameInfo().
479 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
480 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
481 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
482 } else {
483 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
484 &SP::IntRegsRegClass);
485 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
486 }
487
488 if (IsLittleEndian)
489 std::swap(LoVal, HiVal);
490
491 SDValue WholeValue =
492 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
493 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
494 InVals.push_back(WholeValue);
495 continue;
496 }
497 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
498 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
499 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
500 if (VA.getLocVT() == MVT::f32)
501 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
502 else if (VA.getLocVT() != MVT::i32) {
503 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
504 DAG.getValueType(VA.getLocVT()));
505 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
506 }
507 InVals.push_back(Arg);
508 continue;
509 }
510
511 assert(VA.isMemLoc());
512
513 unsigned Offset = VA.getLocMemOffset()+StackOffset;
514 auto PtrVT = getPointerTy(DAG.getDataLayout());
515
516 if (VA.needsCustom()) {
517 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
518 // If it is double-word aligned, just load.
519 if (Offset % 8 == 0) {
520 int FI = MF.getFrameInfo().CreateFixedObject(8,
521 Offset,
522 true);
523 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
524 SDValue Load =
525 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
526 InVals.push_back(Load);
527 continue;
528 }
529
530 int FI = MF.getFrameInfo().CreateFixedObject(4,
531 Offset,
532 true);
533 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
534 SDValue HiVal =
535 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
536 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
537 Offset+4,
538 true);
539 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
540
541 SDValue LoVal =
542 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
543
544 if (IsLittleEndian)
545 std::swap(LoVal, HiVal);
546
547 SDValue WholeValue =
548 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
549 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
550 InVals.push_back(WholeValue);
551 continue;
552 }
553
554 int FI = MF.getFrameInfo().CreateFixedObject(4,
555 Offset,
556 true);
557 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
558 SDValue Load ;
559 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
560 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
561 } else if (VA.getValVT() == MVT::f128) {
562 report_fatal_error("SPARCv8 does not handle f128 in calls; "
563 "pass indirectly");
564 } else {
565 // We shouldn't see any other value types here.
566 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
567 }
568 InVals.push_back(Load);
569 }
570
571 if (MF.getFunction().hasStructRetAttr()) {
572 // Copy the SRet Argument to SRetReturnReg.
574 Register Reg = SFI->getSRetReturnReg();
575 if (!Reg) {
576 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
577 SFI->setSRetReturnReg(Reg);
578 }
579 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
580 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
581 }
582
583 // Store remaining ArgRegs to the stack if this is a varargs function.
584 if (isVarArg) {
585 static const MCPhysReg ArgRegs[] = {
586 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
587 };
588 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
589 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
590 unsigned ArgOffset = CCInfo.getStackSize();
591 if (NumAllocated == 6)
592 ArgOffset += StackOffset;
593 else {
594 assert(!ArgOffset);
595 ArgOffset = 68+4*NumAllocated;
596 }
597
598 // Remember the vararg offset for the va_start implementation.
599 FuncInfo->setVarArgsFrameOffset(ArgOffset);
600
601 std::vector<SDValue> OutChains;
602
603 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
604 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
605 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
606 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
607
608 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
609 true);
610 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
611
612 OutChains.push_back(
613 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
614 ArgOffset += 4;
615 }
616
617 if (!OutChains.empty()) {
618 OutChains.push_back(Chain);
619 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
620 }
621 }
622
623 return Chain;
624}
625
626// Lower formal arguments for the 64 bit ABI.
628 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
629 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
630 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
632
633 // Analyze arguments according to CC_Sparc64.
635 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
636 *DAG.getContext());
637 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
638
639 // The argument array begins at %fp+BIAS+128, after the register save area.
640 const unsigned ArgArea = 128;
641
642 for (const CCValAssign &VA : ArgLocs) {
643 if (VA.isRegLoc()) {
644 // This argument is passed in a register.
645 // All integer register arguments are promoted by the caller to i64.
646
647 // Create a virtual register for the promoted live-in value.
648 Register VReg = MF.addLiveIn(VA.getLocReg(),
649 getRegClassFor(VA.getLocVT()));
650 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
651
652 // Get the high bits for i32 struct elements.
653 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
654 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
655 DAG.getConstant(32, DL, MVT::i32));
656
657 // The caller promoted the argument, so insert an Assert?ext SDNode so we
658 // won't promote the value again in this function.
659 switch (VA.getLocInfo()) {
661 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
662 DAG.getValueType(VA.getValVT()));
663 break;
665 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
666 DAG.getValueType(VA.getValVT()));
667 break;
668 default:
669 break;
670 }
671
672 // Truncate the register down to the argument type.
673 if (VA.isExtInLoc())
674 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
675
676 InVals.push_back(Arg);
677 continue;
678 }
679
680 // The registers are exhausted. This argument was passed on the stack.
681 assert(VA.isMemLoc());
682 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
683 // beginning of the arguments area at %fp+BIAS+128.
684 unsigned Offset = VA.getLocMemOffset() + ArgArea;
685 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
686 // Adjust offset for extended arguments, SPARC is big-endian.
687 // The caller will have written the full slot with extended bytes, but we
688 // prefer our own extending loads.
689 if (VA.isExtInLoc())
690 Offset += 8 - ValSize;
691 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
692 InVals.push_back(
693 DAG.getLoad(VA.getValVT(), DL, Chain,
696 }
697
698 if (!IsVarArg)
699 return Chain;
700
701 // This function takes variable arguments, some of which may have been passed
702 // in registers %i0-%i5. Variable floating point arguments are never passed
703 // in floating point registers. They go on %i0-%i5 or on the stack like
704 // integer arguments.
705 //
706 // The va_start intrinsic needs to know the offset to the first variable
707 // argument.
708 unsigned ArgOffset = CCInfo.getStackSize();
710 // Skip the 128 bytes of register save area.
711 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
712 Subtarget->getStackPointerBias());
713
714 // Save the variable arguments that were passed in registers.
715 // The caller is required to reserve stack space for 6 arguments regardless
716 // of how many arguments were actually passed.
717 SmallVector<SDValue, 8> OutChains;
718 for (; ArgOffset < 6*8; ArgOffset += 8) {
719 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
720 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
721 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
722 auto PtrVT = getPointerTy(MF.getDataLayout());
723 OutChains.push_back(
724 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
726 }
727
728 if (!OutChains.empty())
729 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
730
731 return Chain;
732}
733
734// Check whether any of the argument registers are reserved
736 const MachineFunction &MF) {
737 // The register window design means that outgoing parameters at O*
738 // will appear in the callee as I*.
739 // Be conservative and check both sides of the register names.
740 bool Outgoing =
741 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
742 return TRI->isReservedReg(MF, r);
743 });
744 bool Incoming =
745 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
746 return TRI->isReservedReg(MF, r);
747 });
748 return Outgoing || Incoming;
749}
750
752 const Function &F = MF.getFunction();
753 F.getContext().diagnose(DiagnosticInfoUnsupported{
754 F, ("SPARC doesn't support"
755 " function calls if any of the argument registers is reserved.")});
756}
757
760 SmallVectorImpl<SDValue> &InVals) const {
761 if (Subtarget->is64Bit())
762 return LowerCall_64(CLI, InVals);
763 return LowerCall_32(CLI, InVals);
764}
765
766static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
767 const CallBase *Call) {
768 if (Call)
769 return Call->hasFnAttr(Attribute::ReturnsTwice);
770
771 const Function *CalleeFn = nullptr;
772 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
773 CalleeFn = dyn_cast<Function>(G->getGlobal());
774 } else if (ExternalSymbolSDNode *E =
775 dyn_cast<ExternalSymbolSDNode>(Callee)) {
776 const Function &Fn = DAG.getMachineFunction().getFunction();
777 const Module *M = Fn.getParent();
778 const char *CalleeName = E->getSymbol();
779 CalleeFn = M->getFunction(CalleeName);
780 }
781
782 if (!CalleeFn)
783 return false;
784 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
785}
786
787/// IsEligibleForTailCallOptimization - Check whether the call is eligible
788/// for tail call optimization.
790 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
791
792 auto &Outs = CLI.Outs;
793 auto &Caller = MF.getFunction();
794
795 // Do not tail call opt functions with "disable-tail-calls" attribute.
796 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
797 return false;
798
799 // Do not tail call opt if the stack is used to pass parameters.
800 // 64-bit targets have a slightly higher limit since the ABI requires
801 // to allocate some space even when all the parameters fit inside registers.
802 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
803 if (CCInfo.getStackSize() > StackSizeLimit)
804 return false;
805
806 // Do not tail call opt if either the callee or caller returns
807 // a struct and the other does not.
808 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
809 return false;
810
811 // Byval parameters hand the function a pointer directly into the stack area
812 // we want to reuse during a tail call.
813 for (auto &Arg : Outs)
814 if (Arg.Flags.isByVal())
815 return false;
816
817 return true;
818}
819
820// Lower a call for the 32-bit ABI.
823 SmallVectorImpl<SDValue> &InVals) const {
824 SelectionDAG &DAG = CLI.DAG;
825 SDLoc &dl = CLI.DL;
827 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
829 SDValue Chain = CLI.Chain;
830 SDValue Callee = CLI.Callee;
831 bool &isTailCall = CLI.IsTailCall;
832 CallingConv::ID CallConv = CLI.CallConv;
833 bool isVarArg = CLI.IsVarArg;
835
836 // Analyze operands of the call, assigning locations to each operand.
838 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
839 *DAG.getContext());
840 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
841
842 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
843 CCInfo, CLI, DAG.getMachineFunction());
844
845 // Get the size of the outgoing arguments stack space requirement.
846 unsigned ArgsSize = CCInfo.getStackSize();
847
848 // Keep stack frames 8-byte aligned.
849 ArgsSize = (ArgsSize+7) & ~7;
850
852
853 // Create local copies for byval args.
854 SmallVector<SDValue, 8> ByValArgs;
855 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
856 ISD::ArgFlagsTy Flags = Outs[i].Flags;
857 if (!Flags.isByVal())
858 continue;
859
860 SDValue Arg = OutVals[i];
861 unsigned Size = Flags.getByValSize();
862 Align Alignment = Flags.getNonZeroByValAlign();
863
864 if (Size > 0U) {
865 int FI = MFI.CreateStackObject(Size, Alignment, false);
866 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
867 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
868
869 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
870 false, // isVolatile,
871 (Size <= 32), // AlwaysInline if size <= 32,
872 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
874 ByValArgs.push_back(FIPtr);
875 }
876 else {
877 SDValue nullVal;
878 ByValArgs.push_back(nullVal);
879 }
880 }
881
882 assert(!isTailCall || ArgsSize == 0);
883
884 if (!isTailCall)
885 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
886
888 SmallVector<SDValue, 8> MemOpChains;
889
890 const unsigned StackOffset = 92;
891 bool hasStructRetAttr = false;
892 unsigned SRetArgSize = 0;
893 // Walk the register/memloc assignments, inserting copies/loads.
894 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
895 i != e;
896 ++i, ++realArgIdx) {
897 CCValAssign &VA = ArgLocs[i];
898 SDValue Arg = OutVals[realArgIdx];
899
900 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
901
902 // Use local copy if it is a byval arg.
903 if (Flags.isByVal()) {
904 Arg = ByValArgs[byvalArgIdx++];
905 if (!Arg) {
906 continue;
907 }
908 }
909
910 // Promote the value if needed.
911 switch (VA.getLocInfo()) {
912 default: llvm_unreachable("Unknown loc info!");
913 case CCValAssign::Full: break;
915 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
916 break;
918 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
919 break;
921 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
922 break;
924 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
925 break;
926 }
927
928 if (Flags.isSRet()) {
929 assert(VA.needsCustom());
930
931 if (isTailCall)
932 continue;
933
934 // store SRet argument in %sp+64
935 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
936 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
937 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
938 MemOpChains.push_back(
939 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
940 hasStructRetAttr = true;
941 // sret only allowed on first argument
942 assert(Outs[realArgIdx].OrigArgIndex == 0);
943 SRetArgSize =
944 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
945 continue;
946 }
947
948 if (VA.needsCustom()) {
949 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
950
951 if (VA.isMemLoc()) {
952 unsigned Offset = VA.getLocMemOffset() + StackOffset;
953 // if it is double-word aligned, just store.
954 if (Offset % 8 == 0) {
955 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
956 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
957 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
958 MemOpChains.push_back(
959 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
960 continue;
961 }
962 }
963
964 if (VA.getLocVT() == MVT::f64) {
965 // Move from the float value from float registers into the
966 // integer registers.
967 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
968 Arg = bitcastConstantFPToInt(C, dl, DAG);
969 else
970 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
971 }
972
973 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
974 Arg,
975 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
976 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
977 Arg,
978 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
979
980 if (VA.isRegLoc()) {
981 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
982 assert(i+1 != e);
983 CCValAssign &NextVA = ArgLocs[++i];
984 if (NextVA.isRegLoc()) {
985 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
986 } else {
987 // Store the second part in stack.
988 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
989 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
990 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
991 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
992 MemOpChains.push_back(
993 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
994 }
995 } else {
996 unsigned Offset = VA.getLocMemOffset() + StackOffset;
997 // Store the first part.
998 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
999 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1000 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1001 MemOpChains.push_back(
1002 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
1003 // Store the second part.
1004 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
1005 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1006 MemOpChains.push_back(
1007 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1008 }
1009 continue;
1010 }
1011
1012 // Arguments that can be passed on register must be kept at
1013 // RegsToPass vector
1014 if (VA.isRegLoc()) {
1015 if (VA.getLocVT() != MVT::f32) {
1016 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1017 continue;
1018 }
1019 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1020 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1021 continue;
1022 }
1023
1024 assert(VA.isMemLoc());
1025
1026 // Create a store off the stack pointer for this argument.
1027 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1029 dl);
1030 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1031 MemOpChains.push_back(
1032 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1033 }
1034
1035
1036 // Emit all stores, make sure the occur before any copies into physregs.
1037 if (!MemOpChains.empty())
1038 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1039
1040 // Build a sequence of copy-to-reg nodes chained together with token
1041 // chain and flag operands which copy the outgoing args into registers.
1042 // The InGlue in necessary since all emitted instructions must be
1043 // stuck together.
1044 SDValue InGlue;
1045 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1046 Register Reg = RegsToPass[i].first;
1047 if (!isTailCall)
1048 Reg = toCallerWindow(Reg);
1049 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InGlue);
1050 InGlue = Chain.getValue(1);
1051 }
1052
1053 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1054
1055 // If the callee is a GlobalAddress node (quite common, every direct call is)
1056 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1057 // Likewise ExternalSymbol -> TargetExternalSymbol.
1060 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1061 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
1062 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1063 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
1064
1065 // Returns a chain & a flag for retval copy to use
1066 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1068 Ops.push_back(Chain);
1069 Ops.push_back(Callee);
1070 if (hasStructRetAttr)
1071 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1072 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1073 Register Reg = RegsToPass[i].first;
1074 if (!isTailCall)
1075 Reg = toCallerWindow(Reg);
1076 Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));
1077 }
1078
1079 // Add a register mask operand representing the call-preserved registers.
1080 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1081 const uint32_t *Mask =
1082 ((hasReturnsTwice)
1083 ? TRI->getRTCallPreservedMask(CallConv)
1084 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1085
1086 if (isAnyArgRegReserved(TRI, MF))
1088
1089 assert(Mask && "Missing call preserved mask for calling convention");
1090 Ops.push_back(DAG.getRegisterMask(Mask));
1091
1092 if (InGlue.getNode())
1093 Ops.push_back(InGlue);
1094
1095 if (isTailCall) {
1097 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1098 }
1099
1100 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1101 InGlue = Chain.getValue(1);
1102
1103 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1104 InGlue = Chain.getValue(1);
1105
1106 // Assign locations to each value returned by this call.
1108 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1109 *DAG.getContext());
1110
1111 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1112
1113 // Copy all of the result registers out of their specified physreg.
1114 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1115 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1116 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1117 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1119 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1120 Chain = Lo.getValue(1);
1121 InGlue = Lo.getValue(2);
1122 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1123 DAG.getConstant(0, dl, MVT::i32));
1125 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1126 Chain = Hi.getValue(1);
1127 InGlue = Hi.getValue(2);
1128 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1129 DAG.getConstant(1, dl, MVT::i32));
1130 InVals.push_back(Vec);
1131 } else {
1132 Chain =
1133 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1134 RVLocs[i].getValVT(), InGlue)
1135 .getValue(1);
1136 InGlue = Chain.getValue(2);
1137 InVals.push_back(Chain.getValue(0));
1138 }
1139 }
1140
1141 return Chain;
1142}
1143
1144// FIXME? Maybe this could be a TableGen attribute on some registers and
1145// this table could be generated automatically from RegInfo.
1147 const MachineFunction &MF) const {
1149 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1150 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1151 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1152 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1153 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1154 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1155 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1156 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1157 .Default(0);
1158
1159 // If we're directly referencing register names
1160 // (e.g in GCC C extension `register int r asm("g1");`),
1161 // make sure that said register is in the reserve list.
1162 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1163 if (!TRI->isReservedReg(MF, Reg))
1164 Reg = 0;
1165
1166 if (Reg)
1167 return Reg;
1168
1169 report_fatal_error("Invalid register name global variable");
1170}
1171
1172// Fixup floating point arguments in the ... part of a varargs call.
1173//
1174// The SPARC v9 ABI requires that floating point arguments are treated the same
1175// as integers when calling a varargs function. This does not apply to the
1176// fixed arguments that are part of the function's prototype.
1177//
1178// This function post-processes a CCValAssign array created by
1179// AnalyzeCallOperands().
1182 for (CCValAssign &VA : ArgLocs) {
1183 MVT ValTy = VA.getLocVT();
1184 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1185 // varargs functions.
1186 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1187 continue;
1188 // The fixed arguments to a varargs function still go in FP registers.
1189 if (Outs[VA.getValNo()].IsFixed)
1190 continue;
1191
1192 // This floating point argument should be reassigned.
1193 // Determine the offset into the argument array.
1194 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1195 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1196 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1197 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1198
1199 if (Offset < 6*8) {
1200 // This argument should go in %i0-%i5.
1201 unsigned IReg = SP::I0 + Offset/8;
1202 if (ValTy == MVT::f64)
1203 // Full register, just bitconvert into i64.
1204 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1206 else {
1207 assert(ValTy == MVT::f128 && "Unexpected type!");
1208 // Full register, just bitconvert into i128 -- We will lower this into
1209 // two i64s in LowerCall_64.
1210 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1211 MVT::i128, CCValAssign::BCvt);
1212 }
1213 } else {
1214 // This needs to go to memory, we're out of integer registers.
1215 VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,
1216 VA.getLocVT(), VA.getLocInfo());
1217 }
1218 }
1219}
1220
1221// Lower a call for the 64-bit ABI.
1222SDValue
1224 SmallVectorImpl<SDValue> &InVals) const {
1225 SelectionDAG &DAG = CLI.DAG;
1226 SDLoc DL = CLI.DL;
1227 SDValue Chain = CLI.Chain;
1228 auto PtrVT = getPointerTy(DAG.getDataLayout());
1230
1231 // Analyze operands of the call, assigning locations to each operand.
1233 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1234 *DAG.getContext());
1235 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1236
1238 CCInfo, CLI, DAG.getMachineFunction());
1239
1240 // Get the size of the outgoing arguments stack space requirement.
1241 // The stack offset computed by CC_Sparc64 includes all arguments.
1242 // Called functions expect 6 argument words to exist in the stack frame, used
1243 // or not.
1244 unsigned StackReserved = 6 * 8u;
1245 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1246
1247 // Keep stack frames 16-byte aligned.
1248 ArgsSize = alignTo(ArgsSize, 16);
1249
1250 // Varargs calls require special treatment.
1251 if (CLI.IsVarArg)
1252 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1253
1254 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1255
1256 // Adjust the stack pointer to make room for the arguments.
1257 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1258 // with more than 6 arguments.
1259 if (!CLI.IsTailCall)
1260 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1261
1262 // Collect the set of registers to pass to the function and their values.
1263 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1264 // instruction.
1266
1267 // Collect chains from all the memory opeations that copy arguments to the
1268 // stack. They must follow the stack pointer adjustment above and precede the
1269 // call instruction itself.
1270 SmallVector<SDValue, 8> MemOpChains;
1271
1272 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1273 const CCValAssign &VA = ArgLocs[i];
1274 SDValue Arg = CLI.OutVals[i];
1275
1276 // Promote the value if needed.
1277 switch (VA.getLocInfo()) {
1278 default:
1279 llvm_unreachable("Unknown location info!");
1280 case CCValAssign::Full:
1281 break;
1282 case CCValAssign::SExt:
1283 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1284 break;
1285 case CCValAssign::ZExt:
1286 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1287 break;
1288 case CCValAssign::AExt:
1289 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1290 break;
1291 case CCValAssign::BCvt:
1292 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1293 // SPARC does not support i128 natively. Lower it into two i64, see below.
1294 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1295 || VA.getLocVT() != MVT::i128)
1296 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1297 break;
1298 }
1299
1300 if (VA.isRegLoc()) {
1301 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1302 && VA.getLocVT() == MVT::i128) {
1303 // Store and reload into the integer register reg and reg+1.
1304 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1305 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1306 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1307 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1308 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1309 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1310 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1311
1312 // Store to %sp+BIAS+128+Offset
1313 SDValue Store =
1314 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1315 // Load into Reg and Reg+1
1316 SDValue Hi64 =
1317 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1318 SDValue Lo64 =
1319 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1320
1321 Register HiReg = VA.getLocReg();
1322 Register LoReg = VA.getLocReg() + 1;
1323 if (!CLI.IsTailCall) {
1324 HiReg = toCallerWindow(HiReg);
1325 LoReg = toCallerWindow(LoReg);
1326 }
1327
1328 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1329 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1330 continue;
1331 }
1332
1333 // The custom bit on an i32 return value indicates that it should be
1334 // passed in the high bits of the register.
1335 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1336 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1337 DAG.getConstant(32, DL, MVT::i32));
1338
1339 // The next value may go in the low bits of the same register.
1340 // Handle both at once.
1341 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1342 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1343 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1344 CLI.OutVals[i+1]);
1345 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1346 // Skip the next value, it's already done.
1347 ++i;
1348 }
1349 }
1350
1351 Register Reg = VA.getLocReg();
1352 if (!CLI.IsTailCall)
1353 Reg = toCallerWindow(Reg);
1354 RegsToPass.push_back(std::make_pair(Reg, Arg));
1355 continue;
1356 }
1357
1358 assert(VA.isMemLoc());
1359
1360 // Create a store off the stack pointer for this argument.
1361 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1362 // The argument area starts at %fp+BIAS+128 in the callee frame,
1363 // %sp+BIAS+128 in ours.
1364 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1365 Subtarget->getStackPointerBias() +
1366 128, DL);
1367 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1368 MemOpChains.push_back(
1369 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1370 }
1371
1372 // Emit all stores, make sure they occur before the call.
1373 if (!MemOpChains.empty())
1374 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1375
1376 // Build a sequence of CopyToReg nodes glued together with token chain and
1377 // glue operands which copy the outgoing args into registers. The InGlue is
1378 // necessary since all emitted instructions must be stuck together in order
1379 // to pass the live physical registers.
1380 SDValue InGlue;
1381 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1382 Chain = DAG.getCopyToReg(Chain, DL,
1383 RegsToPass[i].first, RegsToPass[i].second, InGlue);
1384 InGlue = Chain.getValue(1);
1385 }
1386
1387 // If the callee is a GlobalAddress node (quite common, every direct call is)
1388 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1389 // Likewise ExternalSymbol -> TargetExternalSymbol.
1390 SDValue Callee = CLI.Callee;
1391 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1394 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1395 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1396 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1397 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1398
1399 // Build the operands for the call instruction itself.
1401 Ops.push_back(Chain);
1402 Ops.push_back(Callee);
1403 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1404 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1405 RegsToPass[i].second.getValueType()));
1406
1407 // Add a register mask operand representing the call-preserved registers.
1408 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1409 const uint32_t *Mask =
1410 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1411 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1412 CLI.CallConv));
1413
1414 if (isAnyArgRegReserved(TRI, MF))
1416
1417 assert(Mask && "Missing call preserved mask for calling convention");
1418 Ops.push_back(DAG.getRegisterMask(Mask));
1419
1420 // Make sure the CopyToReg nodes are glued to the call instruction which
1421 // consumes the registers.
1422 if (InGlue.getNode())
1423 Ops.push_back(InGlue);
1424
1425 // Now the call itself.
1426 if (CLI.IsTailCall) {
1428 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1429 }
1430 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1431 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1432 InGlue = Chain.getValue(1);
1433
1434 // Revert the stack pointer immediately after the call.
1435 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1436 InGlue = Chain.getValue(1);
1437
1438 // Now extract the return values. This is more or less the same as
1439 // LowerFormalArguments_64.
1440
1441 // Assign locations to each value returned by this call.
1443 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1444 *DAG.getContext());
1445
1446 // Set inreg flag manually for codegen generated library calls that
1447 // return float.
1448 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1449 CLI.Ins[0].Flags.setInReg();
1450
1451 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1452
1453 // Copy all of the result registers out of their specified physreg.
1454 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1455 CCValAssign &VA = RVLocs[i];
1456 assert(VA.isRegLoc() && "Can only return in registers!");
1457 unsigned Reg = toCallerWindow(VA.getLocReg());
1458
1459 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1460 // reside in the same register in the high and low bits. Reuse the
1461 // CopyFromReg previous node to avoid duplicate copies.
1462 SDValue RV;
1463 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1464 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1465 RV = Chain.getValue(0);
1466
1467 // But usually we'll create a new CopyFromReg for a different register.
1468 if (!RV.getNode()) {
1469 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1470 Chain = RV.getValue(1);
1471 InGlue = Chain.getValue(2);
1472 }
1473
1474 // Get the high bits for i32 struct elements.
1475 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1476 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1477 DAG.getConstant(32, DL, MVT::i32));
1478
1479 // The callee promoted the return value, so insert an Assert?ext SDNode so
1480 // we won't promote the value again in this function.
1481 switch (VA.getLocInfo()) {
1482 case CCValAssign::SExt:
1483 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1484 DAG.getValueType(VA.getValVT()));
1485 break;
1486 case CCValAssign::ZExt:
1487 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1488 DAG.getValueType(VA.getValVT()));
1489 break;
1490 default:
1491 break;
1492 }
1493
1494 // Truncate the register down to the return value type.
1495 if (VA.isExtInLoc())
1496 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1497
1498 InVals.push_back(RV);
1499 }
1500
1501 return Chain;
1502}
1503
1504//===----------------------------------------------------------------------===//
1505// TargetLowering Implementation
1506//===----------------------------------------------------------------------===//
1507
1509 if (AI->getOperation() == AtomicRMWInst::Xchg &&
1510 AI->getType()->getPrimitiveSizeInBits() == 32)
1511 return AtomicExpansionKind::None; // Uses xchg instruction
1512
1514}
1515
1516/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1517/// rcond condition.
1519 switch (CC) {
1520 default:
1521 llvm_unreachable("Unknown/unsigned integer condition code!");
1522 case ISD::SETEQ:
1523 return SPCC::REG_Z;
1524 case ISD::SETNE:
1525 return SPCC::REG_NZ;
1526 case ISD::SETLT:
1527 return SPCC::REG_LZ;
1528 case ISD::SETGT:
1529 return SPCC::REG_GZ;
1530 case ISD::SETLE:
1531 return SPCC::REG_LEZ;
1532 case ISD::SETGE:
1533 return SPCC::REG_GEZ;
1534 }
1535}
1536
1537/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1538/// condition.
1540 switch (CC) {
1541 default: llvm_unreachable("Unknown integer condition code!");
1542 case ISD::SETEQ: return SPCC::ICC_E;
1543 case ISD::SETNE: return SPCC::ICC_NE;
1544 case ISD::SETLT: return SPCC::ICC_L;
1545 case ISD::SETGT: return SPCC::ICC_G;
1546 case ISD::SETLE: return SPCC::ICC_LE;
1547 case ISD::SETGE: return SPCC::ICC_GE;
1548 case ISD::SETULT: return SPCC::ICC_CS;
1549 case ISD::SETULE: return SPCC::ICC_LEU;
1550 case ISD::SETUGT: return SPCC::ICC_GU;
1551 case ISD::SETUGE: return SPCC::ICC_CC;
1552 }
1553}
1554
1555/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1556/// FCC condition.
1558 switch (CC) {
1559 default: llvm_unreachable("Unknown fp condition code!");
1560 case ISD::SETEQ:
1561 case ISD::SETOEQ: return SPCC::FCC_E;
1562 case ISD::SETNE:
1563 case ISD::SETUNE: return SPCC::FCC_NE;
1564 case ISD::SETLT:
1565 case ISD::SETOLT: return SPCC::FCC_L;
1566 case ISD::SETGT:
1567 case ISD::SETOGT: return SPCC::FCC_G;
1568 case ISD::SETLE:
1569 case ISD::SETOLE: return SPCC::FCC_LE;
1570 case ISD::SETGE:
1571 case ISD::SETOGE: return SPCC::FCC_GE;
1572 case ISD::SETULT: return SPCC::FCC_UL;
1573 case ISD::SETULE: return SPCC::FCC_ULE;
1574 case ISD::SETUGT: return SPCC::FCC_UG;
1575 case ISD::SETUGE: return SPCC::FCC_UGE;
1576 case ISD::SETUO: return SPCC::FCC_U;
1577 case ISD::SETO: return SPCC::FCC_O;
1578 case ISD::SETONE: return SPCC::FCC_LG;
1579 case ISD::SETUEQ: return SPCC::FCC_UE;
1580 }
1581}
1582
1584 const SparcSubtarget &STI)
1585 : TargetLowering(TM), Subtarget(&STI) {
1586 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1587
1588 // Instructions which use registers as conditionals examine all the
1589 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1590 // matters much whether it's ZeroOrOneBooleanContent, or
1591 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1592 // former.
1595
1596 // Set up the register classes.
1597 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1598 if (!Subtarget->useSoftFloat()) {
1599 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1600 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1601 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1602 }
1603 if (Subtarget->is64Bit()) {
1604 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1605 } else {
1606 // On 32bit sparc, we define a double-register 32bit register
1607 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1608 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1609
1610 // ...but almost all operations must be expanded, so set that as
1611 // the default.
1612 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1613 setOperationAction(Op, MVT::v2i32, Expand);
1614 }
1615 // Truncating/extending stores/loads are also not supported.
1617 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1618 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1619 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1620
1621 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1622 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1623 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1624
1625 setTruncStoreAction(VT, MVT::v2i32, Expand);
1626 setTruncStoreAction(MVT::v2i32, VT, Expand);
1627 }
1628 // However, load and store *are* legal.
1629 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1630 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1633
1634 // And we need to promote i64 loads/stores into vector load/store
1637
1638 // Sadly, this doesn't work:
1639 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1640 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1641 }
1642
1643 // Turn FP extload into load/fpextend
1644 for (MVT VT : MVT::fp_valuetypes()) {
1645 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1646 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1647 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1648 }
1649
1650 // Sparc doesn't have i1 sign extending load
1651 for (MVT VT : MVT::integer_valuetypes())
1652 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1653
1654 // Turn FP truncstore into trunc + store.
1655 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1656 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1657 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1658 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1659 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1660 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1661
1662 // Custom legalize GlobalAddress nodes into LO/HI parts.
1667
1668 // Sparc doesn't have sext_inreg, replace them with shl/sra
1672
1673 // Sparc has no REM or DIVREM operations.
1678
1679 // ... nor does SparcV9.
1680 if (Subtarget->is64Bit()) {
1685 }
1686
1687 // Custom expand fp<->sint
1692
1693 // Custom Expand fp<->uint
1698
1699 // Lower f16 conversion operations into library calls
1706
1709
1710 // Sparc has no select or setcc: expand to SELECT_CC.
1715
1720
1721 // Sparc doesn't have BRCOND either, it has BR_CC.
1723 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1724 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1729
1734
1739
1740 if (Subtarget->is64Bit()) {
1747
1749 Subtarget->usePopc() ? Legal : Expand);
1750 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
1751 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
1753 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1754 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1756 }
1757
1758 // ATOMICs.
1759 // Atomics are supported on SparcV9. 32-bit atomics are also
1760 // supported by some Leon SparcV8 variants. Otherwise, atomics
1761 // are unsupported.
1762 if (Subtarget->isV9()) {
1763 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1764 // but it hasn't been implemented in the backend yet.
1765 if (Subtarget->is64Bit())
1767 else
1769 } else if (Subtarget->hasLeonCasa())
1771 else
1773
1775
1777
1779
1780 // Custom Lower Atomic LOAD/STORE
1783
1784 if (Subtarget->is64Bit()) {
1789 }
1790
1791 if (!Subtarget->isV9()) {
1792 // SparcV8 does not have FNEGD and FABSD.
1795 }
1796
1797 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1798 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1800 setOperationAction(ISD::FREM , MVT::f128, Expand);
1801 setOperationAction(ISD::FMA , MVT::f128, Expand);
1802 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1803 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1805 setOperationAction(ISD::FREM , MVT::f64, Expand);
1806 setOperationAction(ISD::FMA , MVT::f64, Expand);
1807 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1808 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1810 setOperationAction(ISD::FREM , MVT::f32, Expand);
1811 setOperationAction(ISD::FMA , MVT::f32, Expand);
1812 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1813 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1814 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1815 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1820 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1821 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1822 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1823
1827
1828 // Expands to [SU]MUL_LOHI.
1832
1833 if (Subtarget->useSoftMulDiv()) {
1834 // .umul works for both signed and unsigned
1837 setLibcallName(RTLIB::MUL_I32, ".umul");
1838
1840 setLibcallName(RTLIB::SDIV_I32, ".div");
1841
1843 setLibcallName(RTLIB::UDIV_I32, ".udiv");
1844
1845 setLibcallName(RTLIB::SREM_I32, ".rem");
1846 setLibcallName(RTLIB::UREM_I32, ".urem");
1847 }
1848
1849 if (Subtarget->is64Bit()) {
1854
1858 }
1859
1860 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1861 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1862 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1863 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1864
1865 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1867
1868 // Use the default implementation.
1869 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1870 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1874
1876
1878 Subtarget->usePopc() ? Legal : Expand);
1879
1880 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1881 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1882 setOperationAction(ISD::STORE, MVT::f128, Legal);
1883 } else {
1884 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1886 }
1887
1888 if (Subtarget->hasHardQuad()) {
1889 setOperationAction(ISD::FADD, MVT::f128, Legal);
1890 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1891 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1892 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1893 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1896 if (Subtarget->isV9()) {
1897 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1898 setOperationAction(ISD::FABS, MVT::f128, Legal);
1899 } else {
1900 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1901 setOperationAction(ISD::FABS, MVT::f128, Custom);
1902 }
1903
1904 if (!Subtarget->is64Bit()) {
1905 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1906 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1907 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1908 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1909 }
1910
1911 } else {
1912 // Custom legalize f128 operations.
1913
1914 setOperationAction(ISD::FADD, MVT::f128, Custom);
1915 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1916 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1917 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1919 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1920 setOperationAction(ISD::FABS, MVT::f128, Custom);
1921
1925
1926 // Setup Runtime library names.
1927 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1928 setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1929 setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1930 setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1931 setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1932 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1933 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1934 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1935 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1936 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1937 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1938 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1939 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1940 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1941 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1942 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1943 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1944 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1945 } else if (!Subtarget->useSoftFloat()) {
1946 setLibcallName(RTLIB::ADD_F128, "_Q_add");
1947 setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1948 setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1949 setLibcallName(RTLIB::DIV_F128, "_Q_div");
1950 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1951 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1952 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1953 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1954 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1955 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1956 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1957 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1958 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1959 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1960 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1961 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1962 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1963 }
1964 }
1965
1966 if (Subtarget->fixAllFDIVSQRT()) {
1967 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1968 // the former instructions generate errata on LEON processors.
1971 }
1972
1973 if (Subtarget->hasNoFMULS()) {
1975 }
1976
1977 // Custom combine bitcast between f64 and v2i32
1978 if (!Subtarget->is64Bit())
1980
1981 if (Subtarget->hasLeonCycleCounter())
1983
1985
1987
1989}
1990
1992 return Subtarget->useSoftFloat();
1993}
1994
1995const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1996 switch ((SPISD::NodeType)Opcode) {
1997 case SPISD::FIRST_NUMBER: break;
1998 case SPISD::CMPICC: return "SPISD::CMPICC";
1999 case SPISD::CMPFCC: return "SPISD::CMPFCC";
2000 case SPISD::CMPFCC_V9:
2001 return "SPISD::CMPFCC_V9";
2002 case SPISD::BRICC: return "SPISD::BRICC";
2003 case SPISD::BPICC:
2004 return "SPISD::BPICC";
2005 case SPISD::BPXCC:
2006 return "SPISD::BPXCC";
2007 case SPISD::BRFCC: return "SPISD::BRFCC";
2008 case SPISD::BRFCC_V9:
2009 return "SPISD::BRFCC_V9";
2010 case SPISD::BR_REG:
2011 return "SPISD::BR_REG";
2012 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
2013 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
2014 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
2015 case SPISD::SELECT_REG:
2016 return "SPISD::SELECT_REG";
2017 case SPISD::Hi: return "SPISD::Hi";
2018 case SPISD::Lo: return "SPISD::Lo";
2019 case SPISD::FTOI: return "SPISD::FTOI";
2020 case SPISD::ITOF: return "SPISD::ITOF";
2021 case SPISD::FTOX: return "SPISD::FTOX";
2022 case SPISD::XTOF: return "SPISD::XTOF";
2023 case SPISD::CALL: return "SPISD::CALL";
2024 case SPISD::RET_GLUE: return "SPISD::RET_GLUE";
2025 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
2026 case SPISD::FLUSHW: return "SPISD::FLUSHW";
2027 case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
2028 case SPISD::TLS_LD: return "SPISD::TLS_LD";
2029 case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
2030 case SPISD::TAIL_CALL: return "SPISD::TAIL_CALL";
2031 case SPISD::LOAD_GDOP: return "SPISD::LOAD_GDOP";
2032 }
2033 return nullptr;
2034}
2035
2037 EVT VT) const {
2038 if (!VT.isVector())
2039 return MVT::i32;
2041}
2042
2043/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2044/// be zero. Op is expected to be a target specific node. Used by DAG
2045/// combiner.
2047 (const SDValue Op,
2048 KnownBits &Known,
2049 const APInt &DemandedElts,
2050 const SelectionDAG &DAG,
2051 unsigned Depth) const {
2052 KnownBits Known2;
2053 Known.resetAll();
2054
2055 switch (Op.getOpcode()) {
2056 default: break;
2057 case SPISD::SELECT_ICC:
2058 case SPISD::SELECT_XCC:
2059 case SPISD::SELECT_FCC:
2060 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2061 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2062
2063 // Only known if known in both the LHS and RHS.
2064 Known = Known.intersectWith(Known2);
2065 break;
2066 }
2067}
2068
2069// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2070// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2071static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
2072 ISD::CondCode CC, unsigned &SPCC) {
2073 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2074 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2075 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2076 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2077 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2078 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2079 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2080 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2081 SDValue CMPCC = LHS.getOperand(3);
2082 SPCC = LHS.getConstantOperandVal(2);
2083 LHS = CMPCC.getOperand(0);
2084 RHS = CMPCC.getOperand(1);
2085 }
2086}
2087
2088// Convert to a target node and set target flags.
2090 SelectionDAG &DAG) const {
2091 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
2092 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2093 SDLoc(GA),
2094 GA->getValueType(0),
2095 GA->getOffset(), TF);
2096
2097 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
2098 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2099 CP->getAlign(), CP->getOffset(), TF);
2100
2101 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
2102 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2103 Op.getValueType(),
2104 0,
2105 TF);
2106
2107 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
2108 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2109 ES->getValueType(0), TF);
2110
2111 llvm_unreachable("Unhandled address SDNode");
2112}
2113
2114// Split Op into high and low parts according to HiTF and LoTF.
2115// Return an ADD node combining the parts.
2117 unsigned HiTF, unsigned LoTF,
2118 SelectionDAG &DAG) const {
2119 SDLoc DL(Op);
2120 EVT VT = Op.getValueType();
2121 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2122 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2123 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2124}
2125
2126// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2127// or ExternalSymbol SDNode.
2129 SDLoc DL(Op);
2130 EVT VT = getPointerTy(DAG.getDataLayout());
2131
2132 // Handle PIC mode first. SPARC needs a got load for every variable!
2133 if (isPositionIndependent()) {
2134 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2135 PICLevel::Level picLevel = M->getPICLevel();
2136 SDValue Idx;
2137
2138 if (picLevel == PICLevel::SmallPIC) {
2139 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2140 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2142 } else {
2143 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2146 }
2147
2148 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2149 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2150 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2151 // function has calls.
2153 MFI.setHasCalls(true);
2154 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2156 }
2157
2158 // This is one of the absolute code models.
2159 switch(getTargetMachine().getCodeModel()) {
2160 default:
2161 llvm_unreachable("Unsupported absolute code model");
2162 case CodeModel::Small:
2163 // abs32.
2166 case CodeModel::Medium: {
2167 // abs44.
2170 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2172 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2173 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2174 }
2175 case CodeModel::Large: {
2176 // abs64.
2179 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2182 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2183 }
2184 }
2185}
2186
2188 SelectionDAG &DAG) const {
2189 return makeAddress(Op, DAG);
2190}
2191
2193 SelectionDAG &DAG) const {
2194 return makeAddress(Op, DAG);
2195}
2196
2198 SelectionDAG &DAG) const {
2199 return makeAddress(Op, DAG);
2200}
2201
2203 SelectionDAG &DAG) const {
2204
2205 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2206 if (DAG.getTarget().useEmulatedTLS())
2207 return LowerToTLSEmulatedModel(GA, DAG);
2208
2209 SDLoc DL(GA);
2210 const GlobalValue *GV = GA->getGlobal();
2211 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2212
2214
2215 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2216 unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2219 unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2222 unsigned addTF = ((model == TLSModel::GeneralDynamic)
2225 unsigned callTF = ((model == TLSModel::GeneralDynamic)
2228
2229 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2231 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2232 withTargetFlags(Op, addTF, DAG));
2233
2234 SDValue Chain = DAG.getEntryNode();
2235 SDValue InGlue;
2236
2237 Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2238 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2239 InGlue = Chain.getValue(1);
2240 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2241 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2242
2243 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2244 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2246 assert(Mask && "Missing call preserved mask for calling convention");
2247 SDValue Ops[] = {Chain,
2248 Callee,
2249 Symbol,
2250 DAG.getRegister(SP::O0, PtrVT),
2251 DAG.getRegisterMask(Mask),
2252 InGlue};
2253 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2254 InGlue = Chain.getValue(1);
2255 Chain = DAG.getCALLSEQ_END(Chain, 1, 0, InGlue, DL);
2256 InGlue = Chain.getValue(1);
2257 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2258
2259 if (model != TLSModel::LocalDynamic)
2260 return Ret;
2261
2262 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2264 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2266 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2267 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2269 }
2270
2271 if (model == TLSModel::InitialExec) {
2272 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2274
2276
2277 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2278 // function has calls.
2280 MFI.setHasCalls(true);
2281
2282 SDValue TGA = makeHiLoPair(Op,
2285 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2287 DL, PtrVT, Ptr,
2288 withTargetFlags(Op, ldTF, DAG));
2289 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2290 DAG.getRegister(SP::G7, PtrVT), Offset,
2293 }
2294
2295 assert(model == TLSModel::LocalExec);
2296 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2298 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2300 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2301
2302 return DAG.getNode(ISD::ADD, DL, PtrVT,
2303 DAG.getRegister(SP::G7, PtrVT), Offset);
2304}
2305
2307 ArgListTy &Args, SDValue Arg,
2308 const SDLoc &DL,
2309 SelectionDAG &DAG) const {
2311 EVT ArgVT = Arg.getValueType();
2312 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2313
2314 ArgListEntry Entry;
2315 Entry.Node = Arg;
2316 Entry.Ty = ArgTy;
2317
2318 if (ArgTy->isFP128Ty()) {
2319 // Create a stack object and pass the pointer to the library function.
2320 int FI = MFI.CreateStackObject(16, Align(8), false);
2321 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2322 Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2323 Align(8));
2324
2325 Entry.Node = FIPtr;
2326 Entry.Ty = PointerType::getUnqual(ArgTy);
2327 }
2328 Args.push_back(Entry);
2329 return Chain;
2330}
2331
2332SDValue
2334 const char *LibFuncName,
2335 unsigned numArgs) const {
2336
2337 ArgListTy Args;
2338
2340 auto PtrVT = getPointerTy(DAG.getDataLayout());
2341
2342 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2343 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2344 Type *RetTyABI = RetTy;
2345 SDValue Chain = DAG.getEntryNode();
2346 SDValue RetPtr;
2347
2348 if (RetTy->isFP128Ty()) {
2349 // Create a Stack Object to receive the return value of type f128.
2350 ArgListEntry Entry;
2351 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2352 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2353 Entry.Node = RetPtr;
2354 Entry.Ty = PointerType::getUnqual(RetTy);
2355 if (!Subtarget->is64Bit()) {
2356 Entry.IsSRet = true;
2357 Entry.IndirectType = RetTy;
2358 }
2359 Entry.IsReturned = false;
2360 Args.push_back(Entry);
2361 RetTyABI = Type::getVoidTy(*DAG.getContext());
2362 }
2363
2364 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2365 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2366 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2367 }
2369 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2370 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2371
2372 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2373
2374 // chain is in second result.
2375 if (RetTyABI == RetTy)
2376 return CallInfo.first;
2377
2378 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2379
2380 Chain = CallInfo.second;
2381
2382 // Load RetPtr to get the return value.
2383 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2385}
2386
2388 unsigned &SPCC, const SDLoc &DL,
2389 SelectionDAG &DAG) const {
2390
2391 const char *LibCall = nullptr;
2392 bool is64Bit = Subtarget->is64Bit();
2393 switch(SPCC) {
2394 default: llvm_unreachable("Unhandled conditional code!");
2395 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2396 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2397 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2398 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2399 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2400 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2401 case SPCC::FCC_UL :
2402 case SPCC::FCC_ULE:
2403 case SPCC::FCC_UG :
2404 case SPCC::FCC_UGE:
2405 case SPCC::FCC_U :
2406 case SPCC::FCC_O :
2407 case SPCC::FCC_LG :
2408 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2409 }
2410
2411 auto PtrVT = getPointerTy(DAG.getDataLayout());
2412 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2414 ArgListTy Args;
2415 SDValue Chain = DAG.getEntryNode();
2416 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2417 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2418
2420 CLI.setDebugLoc(DL).setChain(Chain)
2421 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2422
2423 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2424
2425 // result is in first, and chain is in second result.
2426 SDValue Result = CallInfo.first;
2427
2428 switch(SPCC) {
2429 default: {
2430 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2431 SPCC = SPCC::ICC_NE;
2432 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2433 }
2434 case SPCC::FCC_UL : {
2435 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2436 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2437 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2438 SPCC = SPCC::ICC_NE;
2439 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2440 }
2441 case SPCC::FCC_ULE: {
2442 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2443 SPCC = SPCC::ICC_NE;
2444 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2445 }
2446 case SPCC::FCC_UG : {
2447 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2448 SPCC = SPCC::ICC_G;
2449 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2450 }
2451 case SPCC::FCC_UGE: {
2452 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2453 SPCC = SPCC::ICC_NE;
2454 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2455 }
2456
2457 case SPCC::FCC_U : {
2458 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2459 SPCC = SPCC::ICC_E;
2460 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2461 }
2462 case SPCC::FCC_O : {
2463 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2464 SPCC = SPCC::ICC_NE;
2465 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2466 }
2467 case SPCC::FCC_LG : {
2468 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2469 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2470 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2471 SPCC = SPCC::ICC_NE;
2472 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2473 }
2474 case SPCC::FCC_UE : {
2475 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2476 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2477 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2478 SPCC = SPCC::ICC_E;
2479 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2480 }
2481 }
2482}
2483
2484static SDValue
2486 const SparcTargetLowering &TLI) {
2487
2488 if (Op.getOperand(0).getValueType() == MVT::f64)
2489 return TLI.LowerF128Op(Op, DAG,
2490 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2491
2492 if (Op.getOperand(0).getValueType() == MVT::f32)
2493 return TLI.LowerF128Op(Op, DAG,
2494 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2495
2496 llvm_unreachable("fpextend with non-float operand!");
2497 return SDValue();
2498}
2499
2500static SDValue
2502 const SparcTargetLowering &TLI) {
2503 // FP_ROUND on f64 and f32 are legal.
2504 if (Op.getOperand(0).getValueType() != MVT::f128)
2505 return Op;
2506
2507 if (Op.getValueType() == MVT::f64)
2508 return TLI.LowerF128Op(Op, DAG,
2509 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2510 if (Op.getValueType() == MVT::f32)
2511 return TLI.LowerF128Op(Op, DAG,
2512 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2513
2514 llvm_unreachable("fpround to non-float!");
2515 return SDValue();
2516}
2517
2519 const SparcTargetLowering &TLI,
2520 bool hasHardQuad) {
2521 SDLoc dl(Op);
2522 EVT VT = Op.getValueType();
2523 assert(VT == MVT::i32 || VT == MVT::i64);
2524
2525 // Expand f128 operations to fp128 abi calls.
2526 if (Op.getOperand(0).getValueType() == MVT::f128
2527 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2528 const char *libName = TLI.getLibcallName(VT == MVT::i32
2529 ? RTLIB::FPTOSINT_F128_I32
2530 : RTLIB::FPTOSINT_F128_I64);
2531 return TLI.LowerF128Op(Op, DAG, libName, 1);
2532 }
2533
2534 // Expand if the resulting type is illegal.
2535 if (!TLI.isTypeLegal(VT))
2536 return SDValue();
2537
2538 // Otherwise, Convert the fp value to integer in an FP register.
2539 if (VT == MVT::i32)
2540 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2541 else
2542 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2543
2544 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2545}
2546
2548 const SparcTargetLowering &TLI,
2549 bool hasHardQuad) {
2550 SDLoc dl(Op);
2551 EVT OpVT = Op.getOperand(0).getValueType();
2552 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2553
2554 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2555
2556 // Expand f128 operations to fp128 ABI calls.
2557 if (Op.getValueType() == MVT::f128
2558 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2559 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2560 ? RTLIB::SINTTOFP_I32_F128
2561 : RTLIB::SINTTOFP_I64_F128);
2562 return TLI.LowerF128Op(Op, DAG, libName, 1);
2563 }
2564
2565 // Expand if the operand type is illegal.
2566 if (!TLI.isTypeLegal(OpVT))
2567 return SDValue();
2568
2569 // Otherwise, Convert the int value to FP in an FP register.
2570 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2571 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2572 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2573}
2574
2576 const SparcTargetLowering &TLI,
2577 bool hasHardQuad) {
2578 SDLoc dl(Op);
2579 EVT VT = Op.getValueType();
2580
2581 // Expand if it does not involve f128 or the target has support for
2582 // quad floating point instructions and the resulting type is legal.
2583 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2584 (hasHardQuad && TLI.isTypeLegal(VT)))
2585 return SDValue();
2586
2587 assert(VT == MVT::i32 || VT == MVT::i64);
2588
2589 return TLI.LowerF128Op(Op, DAG,
2590 TLI.getLibcallName(VT == MVT::i32
2591 ? RTLIB::FPTOUINT_F128_I32
2592 : RTLIB::FPTOUINT_F128_I64),
2593 1);
2594}
2595
2597 const SparcTargetLowering &TLI,
2598 bool hasHardQuad) {
2599 SDLoc dl(Op);
2600 EVT OpVT = Op.getOperand(0).getValueType();
2601 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2602
2603 // Expand if it does not involve f128 or the target has support for
2604 // quad floating point instructions and the operand type is legal.
2605 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2606 return SDValue();
2607
2608 return TLI.LowerF128Op(Op, DAG,
2609 TLI.getLibcallName(OpVT == MVT::i32
2610 ? RTLIB::UINTTOFP_I32_F128
2611 : RTLIB::UINTTOFP_I64_F128),
2612 1);
2613}
2614
2616 const SparcTargetLowering &TLI, bool hasHardQuad,
2617 bool isV9, bool is64Bit) {
2618 SDValue Chain = Op.getOperand(0);
2619 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2620 SDValue LHS = Op.getOperand(2);
2621 SDValue RHS = Op.getOperand(3);
2622 SDValue Dest = Op.getOperand(4);
2623 SDLoc dl(Op);
2624 unsigned Opc, SPCC = ~0U;
2625
2626 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2627 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2628 LookThroughSetCC(LHS, RHS, CC, SPCC);
2629 assert(LHS.getValueType() == RHS.getValueType());
2630
2631 // Get the condition flag.
2632 SDValue CompareFlag;
2633 if (LHS.getValueType().isInteger()) {
2634 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2635 // and the RHS is zero we might be able to use a specialized branch.
2636 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2638 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2639 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2640 LHS);
2641
2642 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2643 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2644 if (isV9)
2645 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2646 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2647 else
2648 // Non-v9 targets don't have xcc.
2649 Opc = SPISD::BRICC;
2650 } else {
2651 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2652 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2653 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2654 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2655 } else {
2656 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2657 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2658 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2659 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2660 }
2661 }
2662 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2663 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2664}
2665
2667 const SparcTargetLowering &TLI, bool hasHardQuad,
2668 bool isV9, bool is64Bit) {
2669 SDValue LHS = Op.getOperand(0);
2670 SDValue RHS = Op.getOperand(1);
2671 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2672 SDValue TrueVal = Op.getOperand(2);
2673 SDValue FalseVal = Op.getOperand(3);
2674 SDLoc dl(Op);
2675 unsigned Opc, SPCC = ~0U;
2676
2677 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2678 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2679 LookThroughSetCC(LHS, RHS, CC, SPCC);
2680 assert(LHS.getValueType() == RHS.getValueType());
2681
2682 SDValue CompareFlag;
2683 if (LHS.getValueType().isInteger()) {
2684 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2685 // and the RHS is zero we might be able to use a specialized select.
2686 // All SELECT_CC between any two scalar integer types are eligible for
2687 // lowering to specialized instructions. Additionally, f32 and f64 types
2688 // are also eligible, but for f128 we can only use the specialized
2689 // instruction when we have hardquad.
2690 EVT ValType = TrueVal.getValueType();
2691 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2692 ValType == MVT::f64 ||
2693 (ValType == MVT::f128 && hasHardQuad);
2694 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2695 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2696 return DAG.getNode(
2697 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2698 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2699
2700 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2701 Opc = LHS.getValueType() == MVT::i32 ?
2703 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2704 } else {
2705 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2706 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2707 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2708 Opc = SPISD::SELECT_ICC;
2709 } else {
2710 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2711 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2712 Opc = SPISD::SELECT_FCC;
2713 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2714 }
2715 }
2716 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2717 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2718}
2719
2721 const SparcTargetLowering &TLI) {
2724 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2725
2726 // Need frame address to find the address of VarArgsFrameIndex.
2728
2729 // vastart just stores the address of the VarArgsFrameIndex slot into the
2730 // memory location argument.
2731 SDLoc DL(Op);
2732 SDValue Offset =
2733 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2734 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2735 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2736 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2737 MachinePointerInfo(SV));
2738}
2739
2741 SDNode *Node = Op.getNode();
2742 EVT VT = Node->getValueType(0);
2743 SDValue InChain = Node->getOperand(0);
2744 SDValue VAListPtr = Node->getOperand(1);
2745 EVT PtrVT = VAListPtr.getValueType();
2746 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2747 SDLoc DL(Node);
2748 SDValue VAList =
2749 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2750 // Increment the pointer, VAList, to the next vaarg.
2751 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2753 DL));
2754 // Store the incremented VAList to the legalized pointer.
2755 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2756 MachinePointerInfo(SV));
2757 // Load the actual argument out of the pointer VAList.
2758 // We can't count on greater alignment than the word size.
2759 return DAG.getLoad(
2760 VT, DL, InChain, VAList, MachinePointerInfo(),
2761 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2762}
2763
2765 const SparcSubtarget *Subtarget) {
2766 SDValue Chain = Op.getOperand(0);
2767 SDValue Size = Op.getOperand(1);
2768 SDValue Alignment = Op.getOperand(2);
2769 MaybeAlign MaybeAlignment =
2770 cast<ConstantSDNode>(Alignment)->getMaybeAlignValue();
2771 EVT VT = Size->getValueType(0);
2772 SDLoc dl(Op);
2773
2774 unsigned SPReg = SP::O6;
2775 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2776
2777 // The resultant pointer needs to be above the register spill area
2778 // at the bottom of the stack.
2779 unsigned regSpillArea;
2780 if (Subtarget->is64Bit()) {
2781 regSpillArea = 128;
2782 } else {
2783 // On Sparc32, the size of the spill area is 92. Unfortunately,
2784 // that's only 4-byte aligned, not 8-byte aligned (the stack
2785 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2786 // aligned dynamic allocation, we actually need to add 96 to the
2787 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2788
2789 // That also means adding 4 to the size of the allocation --
2790 // before applying the 8-byte rounding. Unfortunately, we the
2791 // value we get here has already had rounding applied. So, we need
2792 // to add 8, instead, wasting a bit more memory.
2793
2794 // Further, this only actually needs to be done if the required
2795 // alignment is > 4, but, we've lost that info by this point, too,
2796 // so we always apply it.
2797
2798 // (An alternative approach would be to always reserve 96 bytes
2799 // instead of the required 92, but then we'd waste 4 extra bytes
2800 // in every frame, not just those with dynamic stack allocations)
2801
2802 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2803
2804 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2805 DAG.getConstant(8, dl, VT));
2806 regSpillArea = 96;
2807 }
2808
2809 int64_t Bias = Subtarget->getStackPointerBias();
2810
2811 // Debias and increment SP past the reserved spill area.
2812 // We need the SP to point to the first usable region before calculating
2813 // anything to prevent any of the pointers from becoming out of alignment when
2814 // we rebias the SP later on.
2815 SDValue StartOfUsableStack = DAG.getNode(
2816 ISD::ADD, dl, VT, SP, DAG.getConstant(regSpillArea + Bias, dl, VT));
2817 SDValue AllocatedPtr =
2818 DAG.getNode(ISD::SUB, dl, VT, StartOfUsableStack, Size);
2819
2820 bool IsOveraligned = MaybeAlignment.has_value();
2821 SDValue AlignedPtr =
2822 IsOveraligned
2823 ? DAG.getNode(ISD::AND, dl, VT, AllocatedPtr,
2824 DAG.getSignedConstant(-MaybeAlignment->value(), dl, VT))
2825 : AllocatedPtr;
2826
2827 // Now that we are done, restore the bias and reserved spill area.
2828 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, AlignedPtr,
2829 DAG.getConstant(regSpillArea + Bias, dl, VT));
2830 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP);
2831 SDValue Ops[2] = {AlignedPtr, Chain};
2832 return DAG.getMergeValues(Ops, dl);
2833}
2834
2835
2837 SDLoc dl(Op);
2838 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2839 dl, MVT::Other, DAG.getEntryNode());
2840 return Chain;
2841}
2842
2844 const SparcSubtarget *Subtarget,
2845 bool AlwaysFlush = false) {
2847 MFI.setFrameAddressIsTaken(true);
2848
2849 EVT VT = Op.getValueType();
2850 SDLoc dl(Op);
2851 unsigned FrameReg = SP::I6;
2852 unsigned stackBias = Subtarget->getStackPointerBias();
2853
2854 SDValue FrameAddr;
2855 SDValue Chain;
2856
2857 // flush first to make sure the windowed registers' values are in stack
2858 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2859
2860 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2861
2862 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2863
2864 while (depth--) {
2865 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2866 DAG.getIntPtrConstant(Offset, dl));
2867 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2868 }
2869 if (Subtarget->is64Bit())
2870 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2871 DAG.getIntPtrConstant(stackBias, dl));
2872 return FrameAddr;
2873}
2874
2875
2877 const SparcSubtarget *Subtarget) {
2878
2879 uint64_t depth = Op.getConstantOperandVal(0);
2880
2881 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2882
2883}
2884
2886 const SparcTargetLowering &TLI,
2887 const SparcSubtarget *Subtarget) {
2889 MachineFrameInfo &MFI = MF.getFrameInfo();
2890 MFI.setReturnAddressIsTaken(true);
2891
2893 return SDValue();
2894
2895 EVT VT = Op.getValueType();
2896 SDLoc dl(Op);
2897 uint64_t depth = Op.getConstantOperandVal(0);
2898
2899 SDValue RetAddr;
2900 if (depth == 0) {
2901 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2902 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2903 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2904 return RetAddr;
2905 }
2906
2907 // Need frame address to find return address of the caller.
2908 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2909
2910 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2912 dl, VT,
2913 FrameAddr,
2914 DAG.getIntPtrConstant(Offset, dl));
2915 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2916
2917 return RetAddr;
2918}
2919
2920static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2921 unsigned opcode) {
2922 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2923 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2924
2925 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2926 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2927 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2928
2929 // Note: in little-endian, the floating-point value is stored in the
2930 // registers are in the opposite order, so the subreg with the sign
2931 // bit is the highest-numbered (odd), rather than the
2932 // lowest-numbered (even).
2933
2934 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2935 SrcReg64);
2936 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2937 SrcReg64);
2938
2939 if (DAG.getDataLayout().isLittleEndian())
2940 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2941 else
2942 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2943
2944 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2945 dl, MVT::f64), 0);
2946 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2947 DstReg64, Hi32);
2948 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2949 DstReg64, Lo32);
2950 return DstReg64;
2951}
2952
2953// Lower a f128 load into two f64 loads.
2955{
2956 SDLoc dl(Op);
2957 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2958 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2959
2960 Align Alignment = commonAlignment(LdNode->getOriginalAlign(), 8);
2961
2962 SDValue Hi64 =
2963 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2964 LdNode->getPointerInfo(), Alignment);
2965 EVT addrVT = LdNode->getBasePtr().getValueType();
2966 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2967 LdNode->getBasePtr(),
2968 DAG.getConstant(8, dl, addrVT));
2969 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2970 LdNode->getPointerInfo().getWithOffset(8),
2971 Alignment);
2972
2973 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2974 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2975
2976 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2977 dl, MVT::f128);
2978 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2979 MVT::f128,
2980 SDValue(InFP128, 0),
2981 Hi64,
2982 SubRegEven);
2983 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2984 MVT::f128,
2985 SDValue(InFP128, 0),
2986 Lo64,
2987 SubRegOdd);
2988 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2989 SDValue(Lo64.getNode(), 1) };
2990 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2991 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2992 return DAG.getMergeValues(Ops, dl);
2993}
2994
2996{
2997 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2998
2999 EVT MemVT = LdNode->getMemoryVT();
3000 if (MemVT == MVT::f128)
3001 return LowerF128Load(Op, DAG);
3002
3003 return Op;
3004}
3005
3006// Lower a f128 store into two f64 stores.
3008 SDLoc dl(Op);
3009 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
3010 assert(StNode->getOffset().isUndef() && "Unexpected node type");
3011
3012 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
3013 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
3014
3015 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3016 dl,
3017 MVT::f64,
3018 StNode->getValue(),
3019 SubRegEven);
3020 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3021 dl,
3022 MVT::f64,
3023 StNode->getValue(),
3024 SubRegOdd);
3025
3026 Align Alignment = commonAlignment(StNode->getOriginalAlign(), 8);
3027
3028 SDValue OutChains[2];
3029 OutChains[0] =
3030 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
3031 StNode->getBasePtr(), StNode->getPointerInfo(),
3032 Alignment);
3033 EVT addrVT = StNode->getBasePtr().getValueType();
3034 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
3035 StNode->getBasePtr(),
3036 DAG.getConstant(8, dl, addrVT));
3037 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
3038 StNode->getPointerInfo().getWithOffset(8),
3039 Alignment);
3040 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3041}
3042
3044{
3045 SDLoc dl(Op);
3046 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3047
3048 EVT MemVT = St->getMemoryVT();
3049 if (MemVT == MVT::f128)
3050 return LowerF128Store(Op, DAG);
3051
3052 if (MemVT == MVT::i64) {
3053 // Custom handling for i64 stores: turn it into a bitcast and a
3054 // v2i32 store.
3055 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3056 SDValue Chain = DAG.getStore(
3057 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3058 St->getOriginalAlign(), St->getMemOperand()->getFlags(),
3059 St->getAAInfo());
3060 return Chain;
3061 }
3062
3063 return SDValue();
3064}
3065
3067 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3068 && "invalid opcode");
3069
3070 SDLoc dl(Op);
3071
3072 if (Op.getValueType() == MVT::f64)
3073 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3074 if (Op.getValueType() != MVT::f128)
3075 return Op;
3076
3077 // Lower fabs/fneg on f128 to fabs/fneg on f64
3078 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3079 // (As with LowerF64Op, on little-endian, we need to negate the odd
3080 // subreg)
3081
3082 SDValue SrcReg128 = Op.getOperand(0);
3083 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3084 SrcReg128);
3085 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3086 SrcReg128);
3087
3088 if (DAG.getDataLayout().isLittleEndian()) {
3089 if (isV9)
3090 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3091 else
3092 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3093 } else {
3094 if (isV9)
3095 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3096 else
3097 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3098 }
3099
3100 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3101 dl, MVT::f128), 0);
3102 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3103 DstReg128, Hi64);
3104 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3105 DstReg128, Lo64);
3106 return DstReg128;
3107}
3108
3110 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3111 // Expand with a fence.
3112 return SDValue();
3113 }
3114
3115 // Monotonic load/stores are legal.
3116 return Op;
3117}
3118
3120 SelectionDAG &DAG) const {
3121 unsigned IntNo = Op.getConstantOperandVal(0);
3122 SDLoc dl(Op);
3123 switch (IntNo) {
3124 default: return SDValue(); // Don't custom lower most intrinsics.
3125 case Intrinsic::thread_pointer: {
3126 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3127 return DAG.getRegister(SP::G7, PtrVT);
3128 }
3129 }
3130}
3131
3134
3135 bool hasHardQuad = Subtarget->hasHardQuad();
3136 bool isV9 = Subtarget->isV9();
3137 bool is64Bit = Subtarget->is64Bit();
3138
3139 switch (Op.getOpcode()) {
3140 default: llvm_unreachable("Should not custom lower this!");
3141
3142 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3143 Subtarget);
3144 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3145 Subtarget);
3147 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3148 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3149 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3150 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3151 hasHardQuad);
3152 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3153 hasHardQuad);
3154 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3155 hasHardQuad);
3156 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3157 hasHardQuad);
3158 case ISD::BR_CC:
3159 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3160 case ISD::SELECT_CC:
3161 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3162 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3163 case ISD::VAARG: return LowerVAARG(Op, DAG);
3165 Subtarget);
3166
3167 case ISD::LOAD: return LowerLOAD(Op, DAG);
3168 case ISD::STORE: return LowerSTORE(Op, DAG);
3169 case ISD::FADD: return LowerF128Op(Op, DAG,
3170 getLibcallName(RTLIB::ADD_F128), 2);
3171 case ISD::FSUB: return LowerF128Op(Op, DAG,
3172 getLibcallName(RTLIB::SUB_F128), 2);
3173 case ISD::FMUL: return LowerF128Op(Op, DAG,
3174 getLibcallName(RTLIB::MUL_F128), 2);
3175 case ISD::FDIV: return LowerF128Op(Op, DAG,
3176 getLibcallName(RTLIB::DIV_F128), 2);
3177 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3178 getLibcallName(RTLIB::SQRT_F128),1);
3179 case ISD::FABS:
3180 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3181 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3182 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3183 case ISD::ATOMIC_LOAD:
3184 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3186 }
3187}
3188
3190 const SDLoc &DL,
3191 SelectionDAG &DAG) const {
3192 APInt V = C->getValueAPF().bitcastToAPInt();
3193 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3194 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3195 if (DAG.getDataLayout().isLittleEndian())
3196 std::swap(Lo, Hi);
3197 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3198}
3199
3201 DAGCombinerInfo &DCI) const {
3202 SDLoc dl(N);
3203 SDValue Src = N->getOperand(0);
3204
3205 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3206 Src.getSimpleValueType() == MVT::f64)
3207 return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3208
3209 return SDValue();
3210}
3211
3213 DAGCombinerInfo &DCI) const {
3214 switch (N->getOpcode()) {
3215 default:
3216 break;
3217 case ISD::BITCAST:
3218 return PerformBITCASTCombine(N, DCI);
3219 }
3220 return SDValue();
3221}
3222
3225 MachineBasicBlock *BB) const {
3226 switch (MI.getOpcode()) {
3227 default: llvm_unreachable("Unknown SELECT_CC!");
3228 case SP::SELECT_CC_Int_ICC:
3229 case SP::SELECT_CC_FP_ICC:
3230 case SP::SELECT_CC_DFP_ICC:
3231 case SP::SELECT_CC_QFP_ICC:
3232 if (Subtarget->isV9())
3233 return expandSelectCC(MI, BB, SP::BPICC);
3234 return expandSelectCC(MI, BB, SP::BCOND);
3235 case SP::SELECT_CC_Int_XCC:
3236 case SP::SELECT_CC_FP_XCC:
3237 case SP::SELECT_CC_DFP_XCC:
3238 case SP::SELECT_CC_QFP_XCC:
3239 return expandSelectCC(MI, BB, SP::BPXCC);
3240 case SP::SELECT_CC_Int_FCC:
3241 case SP::SELECT_CC_FP_FCC:
3242 case SP::SELECT_CC_DFP_FCC:
3243 case SP::SELECT_CC_QFP_FCC:
3244 if (Subtarget->isV9())
3245 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3246 return expandSelectCC(MI, BB, SP::FBCOND);
3247 }
3248}
3249
3252 unsigned BROpcode) const {
3253 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3254 DebugLoc dl = MI.getDebugLoc();
3255 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3256
3257 // To "insert" a SELECT_CC instruction, we actually have to insert the
3258 // triangle control-flow pattern. The incoming instruction knows the
3259 // destination vreg to set, the condition code register to branch on, the
3260 // true/false values to select between, and the condition code for the branch.
3261 //
3262 // We produce the following control flow:
3263 // ThisMBB
3264 // | \
3265 // | IfFalseMBB
3266 // | /
3267 // SinkMBB
3268 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3270
3271 MachineBasicBlock *ThisMBB = BB;
3272 MachineFunction *F = BB->getParent();
3273 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3274 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3275 F->insert(It, IfFalseMBB);
3276 F->insert(It, SinkMBB);
3277
3278 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3279 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3280 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3281 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3282
3283 // Set the new successors for ThisMBB.
3284 ThisMBB->addSuccessor(IfFalseMBB);
3285 ThisMBB->addSuccessor(SinkMBB);
3286
3287 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3288 .addMBB(SinkMBB)
3289 .addImm(CC);
3290
3291 // IfFalseMBB just falls through to SinkMBB.
3292 IfFalseMBB->addSuccessor(SinkMBB);
3293
3294 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3295 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3296 MI.getOperand(0).getReg())
3297 .addReg(MI.getOperand(1).getReg())
3298 .addMBB(ThisMBB)
3299 .addReg(MI.getOperand(2).getReg())
3300 .addMBB(IfFalseMBB);
3301
3302 MI.eraseFromParent(); // The pseudo instruction is gone now.
3303 return SinkMBB;
3304}
3305
3306//===----------------------------------------------------------------------===//
3307// Sparc Inline Assembly Support
3308//===----------------------------------------------------------------------===//
3309
3310/// getConstraintType - Given a constraint letter, return the type of
3311/// constraint it is for this target.
3314 if (Constraint.size() == 1) {
3315 switch (Constraint[0]) {
3316 default: break;
3317 case 'r':
3318 case 'f':
3319 case 'e':
3320 return C_RegisterClass;
3321 case 'I': // SIMM13
3322 return C_Immediate;
3323 }
3324 }
3325
3326 return TargetLowering::getConstraintType(Constraint);
3327}
3328
3331 const char *constraint) const {
3333 Value *CallOperandVal = info.CallOperandVal;
3334 // If we don't have a value, we can't do a match,
3335 // but allow it at the lowest weight.
3336 if (!CallOperandVal)
3337 return CW_Default;
3338
3339 // Look at the constraint type.
3340 switch (*constraint) {
3341 default:
3343 break;
3344 case 'I': // SIMM13
3345 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3346 if (isInt<13>(C->getSExtValue()))
3347 weight = CW_Constant;
3348 }
3349 break;
3350 }
3351 return weight;
3352}
3353
3354/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3355/// vector. If it is invalid, don't add anything to Ops.
3357 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3358 SelectionDAG &DAG) const {
3359 SDValue Result;
3360
3361 // Only support length 1 constraints for now.
3362 if (Constraint.size() > 1)
3363 return;
3364
3365 char ConstraintLetter = Constraint[0];
3366 switch (ConstraintLetter) {
3367 default: break;
3368 case 'I':
3369 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3370 if (isInt<13>(C->getSExtValue())) {
3371 Result = DAG.getSignedTargetConstant(C->getSExtValue(), SDLoc(Op),
3372 Op.getValueType());
3373 break;
3374 }
3375 return;
3376 }
3377 }
3378
3379 if (Result.getNode()) {
3380 Ops.push_back(Result);
3381 return;
3382 }
3383 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3384}
3385
3386std::pair<unsigned, const TargetRegisterClass *>
3388 StringRef Constraint,
3389 MVT VT) const {
3390 if (Constraint.empty())
3391 return std::make_pair(0U, nullptr);
3392
3393 if (Constraint.size() == 1) {
3394 switch (Constraint[0]) {
3395 case 'r':
3396 if (VT == MVT::v2i32)
3397 return std::make_pair(0U, &SP::IntPairRegClass);
3398 else if (Subtarget->is64Bit())
3399 return std::make_pair(0U, &SP::I64RegsRegClass);
3400 else
3401 return std::make_pair(0U, &SP::IntRegsRegClass);
3402 case 'f':
3403 if (VT == MVT::f32 || VT == MVT::i32)
3404 return std::make_pair(0U, &SP::FPRegsRegClass);
3405 else if (VT == MVT::f64 || VT == MVT::i64)
3406 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3407 else if (VT == MVT::f128)
3408 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3409 // This will generate an error message
3410 return std::make_pair(0U, nullptr);
3411 case 'e':
3412 if (VT == MVT::f32 || VT == MVT::i32)
3413 return std::make_pair(0U, &SP::FPRegsRegClass);
3414 else if (VT == MVT::f64 || VT == MVT::i64 )
3415 return std::make_pair(0U, &SP::DFPRegsRegClass);
3416 else if (VT == MVT::f128)
3417 return std::make_pair(0U, &SP::QFPRegsRegClass);
3418 // This will generate an error message
3419 return std::make_pair(0U, nullptr);
3420 }
3421 }
3422
3423 if (Constraint.front() != '{')
3424 return std::make_pair(0U, nullptr);
3425
3426 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3427 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3428 if (RegName.empty())
3429 return std::make_pair(0U, nullptr);
3430
3431 unsigned long long RegNo;
3432 // Handle numbered register aliases.
3433 if (RegName[0] == 'r' &&
3434 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3435 // r0-r7 -> g0-g7
3436 // r8-r15 -> o0-o7
3437 // r16-r23 -> l0-l7
3438 // r24-r31 -> i0-i7
3439 if (RegNo > 31)
3440 return std::make_pair(0U, nullptr);
3441 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3442 char RegType = RegTypes[RegNo / 8];
3443 char RegIndex = '0' + (RegNo % 8);
3444 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3445 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3446 }
3447
3448 // Rewrite the fN constraint according to the value type if needed.
3449 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3450 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3451 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3453 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3454 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3456 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3457 } else {
3458 return std::make_pair(0U, nullptr);
3459 }
3460 }
3461
3462 auto ResultPair =
3464 if (!ResultPair.second)
3465 return std::make_pair(0U, nullptr);
3466
3467 // Force the use of I64Regs over IntRegs for 64-bit values.
3468 if (Subtarget->is64Bit() && VT == MVT::i64) {
3469 assert(ResultPair.second == &SP::IntRegsRegClass &&
3470 "Unexpected register class");
3471 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3472 }
3473
3474 return ResultPair;
3475}
3476
3477bool
3479 // The Sparc target isn't yet aware of offsets.
3480 return false;
3481}
3482
3485 SelectionDAG &DAG) const {
3486
3487 SDLoc dl(N);
3488
3489 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3490
3491 switch (N->getOpcode()) {
3492 default:
3493 llvm_unreachable("Do not know how to custom type legalize this operation!");
3494
3495 case ISD::FP_TO_SINT:
3496 case ISD::FP_TO_UINT:
3497 // Custom lower only if it involves f128 or i64.
3498 if (N->getOperand(0).getValueType() != MVT::f128
3499 || N->getValueType(0) != MVT::i64)
3500 return;
3501 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3502 ? RTLIB::FPTOSINT_F128_I64
3503 : RTLIB::FPTOUINT_F128_I64);
3504
3505 Results.push_back(LowerF128Op(SDValue(N, 0),
3506 DAG,
3507 getLibcallName(libCall),
3508 1));
3509 return;
3510 case ISD::READCYCLECOUNTER: {
3511 assert(Subtarget->hasLeonCycleCounter());
3512 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3513 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3514 SDValue Ops[] = { Lo, Hi };
3515 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3516 Results.push_back(Pair);
3517 Results.push_back(N->getOperand(0));
3518 return;
3519 }
3520 case ISD::SINT_TO_FP:
3521 case ISD::UINT_TO_FP:
3522 // Custom lower only if it involves f128 or i64.
3523 if (N->getValueType(0) != MVT::f128
3524 || N->getOperand(0).getValueType() != MVT::i64)
3525 return;
3526
3527 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3528 ? RTLIB::SINTTOFP_I64_F128
3529 : RTLIB::UINTTOFP_I64_F128);
3530
3531 Results.push_back(LowerF128Op(SDValue(N, 0),
3532 DAG,
3533 getLibcallName(libCall),
3534 1));
3535 return;
3536 case ISD::LOAD: {
3537 LoadSDNode *Ld = cast<LoadSDNode>(N);
3538 // Custom handling only for i64: turn i64 load into a v2i32 load,
3539 // and a bitcast.
3540 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3541 return;
3542
3543 SDLoc dl(N);
3544 SDValue LoadRes = DAG.getExtLoad(
3545 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3546 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,
3547 Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),
3548 Ld->getAAInfo());
3549
3550 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3551 Results.push_back(Res);
3552 Results.push_back(LoadRes.getValue(1));
3553 return;
3554 }
3555 }
3556}
3557
3558// Override to enable LOAD_STACK_GUARD lowering on Linux.
3560 if (!Subtarget->isTargetLinux())
3562 return true;
3563}
3564
3565// Override to disable global variable loading on Linux.
3567 if (!Subtarget->isTargetLinux())
3569}
3570
3572 SDNode *Node) const {
3573 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3574 // If the result is dead, replace it with %g0.
3575 if (!Node->hasAnyUseOfValue(0))
3576 MI.getOperand(0).setReg(SP::G0);
3577}
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static constexpr Register SPReg
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI, const MachineFunction &MF)
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static void emitReservedArgRegCallError(const MachineFunction &MF)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
BinOp getOperation() const
Definition: Instructions.h:805
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isMemLoc() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:197
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:457
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:688
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:657
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:686
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:750
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Definition: SelectionDAG.h:577
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
Definition: SelectionDAG.h:801
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:856
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
Definition: SelectionDAG.h:827
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:497
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:712
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:498
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:700
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:796
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:492
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
Definition: SelectionDAG.h:510
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:767
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:580
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
const SparcRegisterInfo * getRegisterInfo() const override
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
bool isTargetLinux() const
bool is64Bit() const
const SparcInstrInfo * getInstrInfo() const override
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool useSoftFloat() const override
SDValue bitcastConstantFPToInt(ConstantFPSDNode *C, const SDLoc &DL, SelectionDAG &DAG) const
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool IsEligibleForTailCallOptimization(CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
computeKnownBitsForTargetNode - Determine which of the bits specified in Mask are known to be either ...
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue PerformBITCASTCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI)
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerF128Compare(SDValue LHS, SDValue RHS, unsigned &SPCC, const SDLoc &DL, SelectionDAG &DAG) const
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
char back() const
back - Get the last character in the string.
Definition: StringRef.h:159
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
char front() const
front - Get the first character in the string.
Definition: StringRef.h:153
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:144
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isFP128Ty() const
Return true if this is 'fp128'.
Definition: Type.h:162
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:780
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1197
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1193
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:257
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:744
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1226
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:1312
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:276
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1102
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:814
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:498
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:841
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:397
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1304
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:262
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:964
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:954
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:236
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1494
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:805
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:1059
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:981
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1148
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1123
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1127
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:757
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:1308
@ UNDEF
UNDEF - An undefined node.
Definition: ISDOpcodes.h:218
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1222
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:215
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:674
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:735
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:550
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:811
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:1282
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:772
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
Definition: ISDOpcodes.h:1319
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1112
@ ConstantPool
Definition: ISDOpcodes.h:82
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:849
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:939
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:100
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:887
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:1253
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:709
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1279
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:190
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:286
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:539
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1333
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:920
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:817
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1217
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1141
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:794
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:508
@ AssertZext
Definition: ISDOpcodes.h:62
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition: ISDOpcodes.h:530
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1610
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
Definition: ISDOpcodes.h:1649
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
CondCodes
Definition: Sparc.h:42
@ FCC_ULE
Definition: Sparc.h:75
@ FCC_UG
Definition: Sparc.h:65
@ ICC_G
Definition: Sparc.h:47
@ REG_LEZ
Definition: Sparc.h:98
@ REG_GZ
Definition: Sparc.h:101
@ ICC_L
Definition: Sparc.h:50
@ FCC_NE
Definition: Sparc.h:69
@ ICC_CS
Definition: Sparc.h:54
@ FCC_LG
Definition: Sparc.h:68
@ ICC_LEU
Definition: Sparc.h:52
@ FCC_LE
Definition: Sparc.h:74
@ ICC_LE
Definition: Sparc.h:48
@ FCC_U
Definition: Sparc.h:63
@ ICC_GE
Definition: Sparc.h:49
@ FCC_E
Definition: Sparc.h:70
@ REG_LZ
Definition: Sparc.h:99
@ FCC_L
Definition: Sparc.h:66
@ ICC_GU
Definition: Sparc.h:51
@ FCC_O
Definition: Sparc.h:76
@ ICC_NE
Definition: Sparc.h:45
@ FCC_UE
Definition: Sparc.h:71
@ REG_NZ
Definition: Sparc.h:100
@ ICC_E
Definition: Sparc.h:46
@ FCC_GE
Definition: Sparc.h:72
@ FCC_UGE
Definition: Sparc.h:73
@ REG_Z
Definition: Sparc.h:97
@ ICC_CC
Definition: Sparc.h:53
@ REG_GEZ
Definition: Sparc.h:102
@ FCC_G
Definition: Sparc.h:64
@ FCC_UL
Definition: Sparc.h:67
@ GeneralDynamic
Definition: CodeGen.h:46
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isStrongerThanMonotonic(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
DWARFExpression::Operation Op
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
Definition: StringRef.cpp:488
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:94
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition: ValueTypes.h:376
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:210
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:73
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition: KnownBits.h:303
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})