LLVM 20.0.0git
MipsISelLowering.cpp
Go to the documentation of this file.
1//===- MipsISelLowering.cpp - Mips DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that Mips uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MipsISelLowering.h"
18#include "MipsCCState.h"
19#include "MipsInstrInfo.h"
20#include "MipsMachineFunction.h"
21#include "MipsRegisterInfo.h"
22#include "MipsSubtarget.h"
23#include "MipsTargetMachine.h"
25#include "llvm/ADT/APFloat.h"
26#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/Statistic.h"
29#include "llvm/ADT/StringRef.h"
50#include "llvm/IR/CallingConv.h"
51#include "llvm/IR/Constants.h"
52#include "llvm/IR/DataLayout.h"
53#include "llvm/IR/DebugLoc.h"
55#include "llvm/IR/Function.h"
56#include "llvm/IR/GlobalValue.h"
57#include "llvm/IR/Module.h"
58#include "llvm/IR/Type.h"
59#include "llvm/IR/Value.h"
60#include "llvm/MC/MCContext.h"
69#include <algorithm>
70#include <cassert>
71#include <cctype>
72#include <cstdint>
73#include <deque>
74#include <iterator>
75#include <utility>
76#include <vector>
77
78using namespace llvm;
79
80#define DEBUG_TYPE "mips-lower"
81
82STATISTIC(NumTailCalls, "Number of tail calls");
83
84static cl::opt<bool>
85NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
86 cl::desc("MIPS: Don't trap on integer division by zero."),
87 cl::init(false));
88
90
91static const MCPhysReg Mips64DPRegs[8] = {
92 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
93 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
94};
95
96// The MIPS MSA ABI passes vector arguments in the integer register set.
97// The number of integer registers used is dependant on the ABI used.
100 EVT VT) const {
101 if (!VT.isVector())
102 return getRegisterType(Context, VT);
103
105 return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
106 : MVT::i64;
107 return getRegisterType(Context, VT.getVectorElementType());
108}
109
112 EVT VT) const {
113 if (VT.isVector()) {
115 return divideCeil(VT.getSizeInBits(), Subtarget.isABI_O32() ? 32 : 64);
116 return VT.getVectorNumElements() *
118 }
119 return MipsTargetLowering::getNumRegisters(Context, VT);
120}
121
123 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
124 unsigned &NumIntermediates, MVT &RegisterVT) const {
125 if (VT.isPow2VectorType() && VT.getVectorElementType().isRound()) {
126 IntermediateVT = getRegisterTypeForCallingConv(Context, CC, VT);
127 RegisterVT = IntermediateVT.getSimpleVT();
128 NumIntermediates = getNumRegistersForCallingConv(Context, CC, VT);
129 return NumIntermediates;
130 }
131 IntermediateVT = VT.getVectorElementType();
132 NumIntermediates = VT.getVectorNumElements();
133 RegisterVT = getRegisterType(Context, IntermediateVT);
134 return NumIntermediates * getNumRegisters(Context, IntermediateVT);
135}
136
140 return DAG.getRegister(FI->getGlobalBaseReg(MF), Ty);
141}
142
143SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
144 SelectionDAG &DAG,
145 unsigned Flag) const {
146 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
147}
148
149SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
150 SelectionDAG &DAG,
151 unsigned Flag) const {
152 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
153}
154
155SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
156 SelectionDAG &DAG,
157 unsigned Flag) const {
158 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
159}
160
161SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
162 SelectionDAG &DAG,
163 unsigned Flag) const {
164 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
165}
166
167SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
168 SelectionDAG &DAG,
169 unsigned Flag) const {
170 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
171 N->getOffset(), Flag);
172}
173
174const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
175 switch ((MipsISD::NodeType)Opcode) {
176 case MipsISD::FIRST_NUMBER: break;
177 case MipsISD::JmpLink: return "MipsISD::JmpLink";
178 case MipsISD::TailCall: return "MipsISD::TailCall";
179 case MipsISD::Highest: return "MipsISD::Highest";
180 case MipsISD::Higher: return "MipsISD::Higher";
181 case MipsISD::Hi: return "MipsISD::Hi";
182 case MipsISD::Lo: return "MipsISD::Lo";
183 case MipsISD::GotHi: return "MipsISD::GotHi";
184 case MipsISD::TlsHi: return "MipsISD::TlsHi";
185 case MipsISD::GPRel: return "MipsISD::GPRel";
186 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
187 case MipsISD::Ret: return "MipsISD::Ret";
188 case MipsISD::ERet: return "MipsISD::ERet";
189 case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
190 case MipsISD::FAbs: return "MipsISD::FAbs";
191 case MipsISD::FMS: return "MipsISD::FMS";
192 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
193 case MipsISD::FPCmp: return "MipsISD::FPCmp";
194 case MipsISD::FSELECT: return "MipsISD::FSELECT";
195 case MipsISD::MTC1_D64: return "MipsISD::MTC1_D64";
196 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
197 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
198 case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
199 case MipsISD::MFHI: return "MipsISD::MFHI";
200 case MipsISD::MFLO: return "MipsISD::MFLO";
201 case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
202 case MipsISD::Mult: return "MipsISD::Mult";
203 case MipsISD::Multu: return "MipsISD::Multu";
204 case MipsISD::MAdd: return "MipsISD::MAdd";
205 case MipsISD::MAddu: return "MipsISD::MAddu";
206 case MipsISD::MSub: return "MipsISD::MSub";
207 case MipsISD::MSubu: return "MipsISD::MSubu";
208 case MipsISD::DivRem: return "MipsISD::DivRem";
209 case MipsISD::DivRemU: return "MipsISD::DivRemU";
210 case MipsISD::DivRem16: return "MipsISD::DivRem16";
211 case MipsISD::DivRemU16: return "MipsISD::DivRemU16";
212 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
213 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
214 case MipsISD::Wrapper: return "MipsISD::Wrapper";
215 case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
216 case MipsISD::Sync: return "MipsISD::Sync";
217 case MipsISD::Ext: return "MipsISD::Ext";
218 case MipsISD::Ins: return "MipsISD::Ins";
219 case MipsISD::CIns: return "MipsISD::CIns";
220 case MipsISD::LWL: return "MipsISD::LWL";
221 case MipsISD::LWR: return "MipsISD::LWR";
222 case MipsISD::SWL: return "MipsISD::SWL";
223 case MipsISD::SWR: return "MipsISD::SWR";
224 case MipsISD::LDL: return "MipsISD::LDL";
225 case MipsISD::LDR: return "MipsISD::LDR";
226 case MipsISD::SDL: return "MipsISD::SDL";
227 case MipsISD::SDR: return "MipsISD::SDR";
228 case MipsISD::EXTP: return "MipsISD::EXTP";
229 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
230 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
231 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
232 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
233 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
234 case MipsISD::SHILO: return "MipsISD::SHILO";
235 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
236 case MipsISD::MULSAQ_S_W_PH: return "MipsISD::MULSAQ_S_W_PH";
237 case MipsISD::MAQ_S_W_PHL: return "MipsISD::MAQ_S_W_PHL";
238 case MipsISD::MAQ_S_W_PHR: return "MipsISD::MAQ_S_W_PHR";
239 case MipsISD::MAQ_SA_W_PHL: return "MipsISD::MAQ_SA_W_PHL";
240 case MipsISD::MAQ_SA_W_PHR: return "MipsISD::MAQ_SA_W_PHR";
241 case MipsISD::DOUBLE_SELECT_I: return "MipsISD::DOUBLE_SELECT_I";
242 case MipsISD::DOUBLE_SELECT_I64: return "MipsISD::DOUBLE_SELECT_I64";
243 case MipsISD::DPAU_H_QBL: return "MipsISD::DPAU_H_QBL";
244 case MipsISD::DPAU_H_QBR: return "MipsISD::DPAU_H_QBR";
245 case MipsISD::DPSU_H_QBL: return "MipsISD::DPSU_H_QBL";
246 case MipsISD::DPSU_H_QBR: return "MipsISD::DPSU_H_QBR";
247 case MipsISD::DPAQ_S_W_PH: return "MipsISD::DPAQ_S_W_PH";
248 case MipsISD::DPSQ_S_W_PH: return "MipsISD::DPSQ_S_W_PH";
249 case MipsISD::DPAQ_SA_L_W: return "MipsISD::DPAQ_SA_L_W";
250 case MipsISD::DPSQ_SA_L_W: return "MipsISD::DPSQ_SA_L_W";
251 case MipsISD::DPA_W_PH: return "MipsISD::DPA_W_PH";
252 case MipsISD::DPS_W_PH: return "MipsISD::DPS_W_PH";
253 case MipsISD::DPAQX_S_W_PH: return "MipsISD::DPAQX_S_W_PH";
254 case MipsISD::DPAQX_SA_W_PH: return "MipsISD::DPAQX_SA_W_PH";
255 case MipsISD::DPAX_W_PH: return "MipsISD::DPAX_W_PH";
256 case MipsISD::DPSX_W_PH: return "MipsISD::DPSX_W_PH";
257 case MipsISD::DPSQX_S_W_PH: return "MipsISD::DPSQX_S_W_PH";
258 case MipsISD::DPSQX_SA_W_PH: return "MipsISD::DPSQX_SA_W_PH";
259 case MipsISD::MULSA_W_PH: return "MipsISD::MULSA_W_PH";
260 case MipsISD::MULT: return "MipsISD::MULT";
261 case MipsISD::MULTU: return "MipsISD::MULTU";
262 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSP";
263 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
264 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
265 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
266 case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP";
267 case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP";
268 case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
269 case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
270 case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
271 case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
272 case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
273 case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
274 case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
275 case MipsISD::VCEQ: return "MipsISD::VCEQ";
276 case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
277 case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
278 case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
279 case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
280 case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
281 case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
282 case MipsISD::VNOR: return "MipsISD::VNOR";
283 case MipsISD::VSHF: return "MipsISD::VSHF";
284 case MipsISD::SHF: return "MipsISD::SHF";
285 case MipsISD::ILVEV: return "MipsISD::ILVEV";
286 case MipsISD::ILVOD: return "MipsISD::ILVOD";
287 case MipsISD::ILVL: return "MipsISD::ILVL";
288 case MipsISD::ILVR: return "MipsISD::ILVR";
289 case MipsISD::PCKEV: return "MipsISD::PCKEV";
290 case MipsISD::PCKOD: return "MipsISD::PCKOD";
291 case MipsISD::INSVE: return "MipsISD::INSVE";
292 }
293 return nullptr;
294}
295
297 const MipsSubtarget &STI)
298 : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
299 // Mips does not have i1 type, so use i32 for
300 // setcc operations results (slt, sgt, ...).
303 // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
304 // does. Integer booleans still use 0 and 1.
308
309 // Load extented operations for i1 types must be promoted
310 for (MVT VT : MVT::integer_valuetypes()) {
314 }
315
316 // MIPS doesn't have extending float->double load/store. Set LoadExtAction
317 // for f32, f16
318 for (MVT VT : MVT::fp_valuetypes()) {
319 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
320 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
321 }
322
323 // Set LoadExtAction for f16 vectors to Expand
325 MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements());
326 if (F16VT.isValid())
328 }
329
330 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
331 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
332
333 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
334
335 // Used by legalize types to correctly generate the setcc result.
336 // Without this, every float setcc comes with a AND/OR with the result,
337 // we don't want this, since the fpcmp result goes to a flag register,
338 // which is used implicitly by brcond and select operations.
339 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
340
341 // Mips Custom Operations
359
360 // Lower fmin/fmax/fclass operations for MIPS R6.
361 if (Subtarget.hasMips32r6()) {
372 } else {
375 }
376
377 if (Subtarget.isGP64bit()) {
384 if (Subtarget.hasMips64r6()) {
387 } else {
390 }
395 }
396
397 if (!Subtarget.isGP64bit()) {
401 }
402
404 if (Subtarget.isGP64bit())
406
415
416 // Operations not directly supported by Mips.
430 if (Subtarget.hasCnMips()) {
433 } else {
436 }
443
444 if (!Subtarget.hasMips32r2())
446
447 if (!Subtarget.hasMips64r2())
449
466
467 // Lower f16 conversion operations into library calls
472
474
479
480 // Use the default for now
483
484 if (!Subtarget.isGP64bit()) {
487 }
488
489 if (!Subtarget.hasMips32r2()) {
492 }
493
494 // MIPS16 lacks MIPS32's clz and clo instructions.
497 if (!Subtarget.hasMips64())
499
500 if (!Subtarget.hasMips32r2())
502 if (!Subtarget.hasMips64r2())
504
506 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Legal);
507 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Legal);
508 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Legal);
509 setTruncStoreAction(MVT::i64, MVT::i32, Legal);
510 } else if (Subtarget.isGP64bit()) {
511 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom);
512 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom);
513 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom);
514 setTruncStoreAction(MVT::i64, MVT::i32, Custom);
515 }
516
517 setOperationAction(ISD::TRAP, MVT::Other, Legal);
518
521
522 if (Subtarget.isGP64bit())
524 else
526
528
529 // The arguments on the stack are defined in terms of 4-byte slots on O32
530 // and 8-byte slots on N32/N64.
532 : Align(4));
533
534 setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
535
537
538 isMicroMips = Subtarget.inMicroMipsMode();
539}
540
541const MipsTargetLowering *
543 const MipsSubtarget &STI) {
544 if (STI.inMips16Mode())
545 return createMips16TargetLowering(TM, STI);
546
547 return createMipsSETargetLowering(TM, STI);
548}
549
550// Create a fast isel object.
551FastISel *
553 const TargetLibraryInfo *libInfo) const {
554 const MipsTargetMachine &TM =
555 static_cast<const MipsTargetMachine &>(funcInfo.MF->getTarget());
556
557 // We support only the standard encoding [MIPS32,MIPS32R5] ISAs.
558 bool UseFastISel = TM.Options.EnableFastISel && Subtarget.hasMips32() &&
561
562 // Disable if either of the following is true:
563 // We do not generate PIC, the ABI is not O32, XGOT is being used.
564 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
566 UseFastISel = false;
567
568 return UseFastISel ? Mips::createFastISel(funcInfo, libInfo) : nullptr;
569}
570
572 EVT VT) const {
573 if (!VT.isVector())
574 return MVT::i32;
576}
577
580 const MipsSubtarget &Subtarget) {
581 if (DCI.isBeforeLegalizeOps())
582 return SDValue();
583
584 EVT Ty = N->getValueType(0);
585 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
586 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
587 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
589 SDLoc DL(N);
590
591 SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
592 N->getOperand(0), N->getOperand(1));
593 SDValue InChain = DAG.getEntryNode();
594 SDValue InGlue = DivRem;
595
596 // insert MFLO
597 if (N->hasAnyUseOfValue(0)) {
598 SDValue CopyFromLo = DAG.getCopyFromReg(InChain, DL, LO, Ty,
599 InGlue);
600 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo);
601 InChain = CopyFromLo.getValue(1);
602 InGlue = CopyFromLo.getValue(2);
603 }
604
605 // insert MFHI
606 if (N->hasAnyUseOfValue(1)) {
607 SDValue CopyFromHi = DAG.getCopyFromReg(InChain, DL,
608 HI, Ty, InGlue);
609 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi);
610 }
611
612 return SDValue();
613}
614
616 switch (CC) {
617 default: llvm_unreachable("Unknown fp condition code!");
618 case ISD::SETEQ:
619 case ISD::SETOEQ: return Mips::FCOND_OEQ;
620 case ISD::SETUNE: return Mips::FCOND_UNE;
621 case ISD::SETLT:
622 case ISD::SETOLT: return Mips::FCOND_OLT;
623 case ISD::SETGT:
624 case ISD::SETOGT: return Mips::FCOND_OGT;
625 case ISD::SETLE:
626 case ISD::SETOLE: return Mips::FCOND_OLE;
627 case ISD::SETGE:
628 case ISD::SETOGE: return Mips::FCOND_OGE;
629 case ISD::SETULT: return Mips::FCOND_ULT;
630 case ISD::SETULE: return Mips::FCOND_ULE;
631 case ISD::SETUGT: return Mips::FCOND_UGT;
632 case ISD::SETUGE: return Mips::FCOND_UGE;
633 case ISD::SETUO: return Mips::FCOND_UN;
634 case ISD::SETO: return Mips::FCOND_OR;
635 case ISD::SETNE:
636 case ISD::SETONE: return Mips::FCOND_ONE;
637 case ISD::SETUEQ: return Mips::FCOND_UEQ;
638 }
639}
640
641/// This function returns true if the floating point conditional branches and
642/// conditional moves which use condition code CC should be inverted.
644 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
645 return false;
646
648 "Illegal Condition Code");
649
650 return true;
651}
652
653// Creates and returns an FPCmp node from a setcc node.
654// Returns Op if setcc is not a floating point comparison.
656 // must be a SETCC node
657 if (Op.getOpcode() != ISD::SETCC)
658 return Op;
659
660 SDValue LHS = Op.getOperand(0);
661
662 if (!LHS.getValueType().isFloatingPoint())
663 return Op;
664
665 SDValue RHS = Op.getOperand(1);
666 SDLoc DL(Op);
667
668 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
669 // node if necessary.
670 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
671
672 return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
673 DAG.getConstant(condCodeToFCC(CC), DL, MVT::i32));
674}
675
676// Creates and returns a CMovFPT/F node.
678 SDValue False, const SDLoc &DL) {
679 ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
680 bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
681 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
682
683 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
684 True.getValueType(), True, FCC0, False, Cond);
685}
686
689 const MipsSubtarget &Subtarget) {
690 if (DCI.isBeforeLegalizeOps())
691 return SDValue();
692
693 SDValue SetCC = N->getOperand(0);
694
695 if ((SetCC.getOpcode() != ISD::SETCC) ||
696 !SetCC.getOperand(0).getValueType().isInteger())
697 return SDValue();
698
699 SDValue False = N->getOperand(2);
700 EVT FalseTy = False.getValueType();
701
702 if (!FalseTy.isInteger())
703 return SDValue();
704
705 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(False);
706
707 // If the RHS (False) is 0, we swap the order of the operands
708 // of ISD::SELECT (obviously also inverting the condition) so that we can
709 // take advantage of conditional moves using the $0 register.
710 // Example:
711 // return (a != 0) ? x : 0;
712 // load $reg, x
713 // movz $reg, $0, a
714 if (!FalseC)
715 return SDValue();
716
717 const SDLoc DL(N);
718
719 if (!FalseC->getZExtValue()) {
720 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
721 SDValue True = N->getOperand(1);
722
723 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
724 SetCC.getOperand(1),
726
727 return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
728 }
729
730 // If both operands are integer constants there's a possibility that we
731 // can do some interesting optimizations.
732 SDValue True = N->getOperand(1);
733 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(True);
734
735 if (!TrueC || !True.getValueType().isInteger())
736 return SDValue();
737
738 // We'll also ignore MVT::i64 operands as this optimizations proves
739 // to be ineffective because of the required sign extensions as the result
740 // of a SETCC operator is always MVT::i32 for non-vector types.
741 if (True.getValueType() == MVT::i64)
742 return SDValue();
743
744 int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
745
746 // 1) (a < x) ? y : y-1
747 // slti $reg1, a, x
748 // addiu $reg2, $reg1, y-1
749 if (Diff == 1)
750 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, False);
751
752 // 2) (a < x) ? y-1 : y
753 // slti $reg1, a, x
754 // xor $reg1, $reg1, 1
755 // addiu $reg2, $reg1, y-1
756 if (Diff == -1) {
757 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
758 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
759 SetCC.getOperand(1),
761 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
762 }
763
764 // Could not optimize.
765 return SDValue();
766}
767
770 const MipsSubtarget &Subtarget) {
771 if (DCI.isBeforeLegalizeOps())
772 return SDValue();
773
774 SDValue ValueIfTrue = N->getOperand(0), ValueIfFalse = N->getOperand(2);
775
776 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(ValueIfFalse);
777 if (!FalseC || FalseC->getZExtValue())
778 return SDValue();
779
780 // Since RHS (False) is 0, we swap the order of the True/False operands
781 // (obviously also inverting the condition) so that we can
782 // take advantage of conditional moves using the $0 register.
783 // Example:
784 // return (a != 0) ? x : 0;
785 // load $reg, x
786 // movz $reg, $0, a
787 unsigned Opc = (N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
789
790 SDValue FCC = N->getOperand(1), Glue = N->getOperand(3);
791 return DAG.getNode(Opc, SDLoc(N), ValueIfFalse.getValueType(),
792 ValueIfFalse, FCC, ValueIfTrue, Glue);
793}
794
797 const MipsSubtarget &Subtarget) {
798 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
799 return SDValue();
800
801 SDValue FirstOperand = N->getOperand(0);
802 unsigned FirstOperandOpc = FirstOperand.getOpcode();
803 SDValue Mask = N->getOperand(1);
804 EVT ValTy = N->getValueType(0);
805 SDLoc DL(N);
806
807 uint64_t Pos = 0;
808 unsigned SMPos, SMSize;
809 ConstantSDNode *CN;
810 SDValue NewOperand;
811 unsigned Opc;
812
813 // Op's second operand must be a shifted mask.
814 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
815 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
816 return SDValue();
817
818 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
819 // Pattern match EXT.
820 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
821 // => ext $dst, $src, pos, size
822
823 // The second operand of the shift must be an immediate.
824 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
825 return SDValue();
826
827 Pos = CN->getZExtValue();
828
829 // Return if the shifted mask does not start at bit 0 or the sum of its size
830 // and Pos exceeds the word's size.
831 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
832 return SDValue();
833
834 Opc = MipsISD::Ext;
835 NewOperand = FirstOperand.getOperand(0);
836 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {
837 // Pattern match CINS.
838 // $dst = and (shl $src , pos), mask
839 // => cins $dst, $src, pos, size
840 // mask is a shifted mask with consecutive 1's, pos = shift amount,
841 // size = population count.
842
843 // The second operand of the shift must be an immediate.
844 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
845 return SDValue();
846
847 Pos = CN->getZExtValue();
848
849 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
850 Pos + SMSize > ValTy.getSizeInBits())
851 return SDValue();
852
853 NewOperand = FirstOperand.getOperand(0);
854 // SMSize is 'location' (position) in this case, not size.
855 SMSize--;
856 Opc = MipsISD::CIns;
857 } else {
858 // Pattern match EXT.
859 // $dst = and $src, (2**size - 1) , if size > 16
860 // => ext $dst, $src, pos, size , pos = 0
861
862 // If the mask is <= 0xffff, andi can be used instead.
863 if (CN->getZExtValue() <= 0xffff)
864 return SDValue();
865
866 // Return if the mask doesn't start at position 0.
867 if (SMPos)
868 return SDValue();
869
870 Opc = MipsISD::Ext;
871 NewOperand = FirstOperand;
872 }
873 return DAG.getNode(Opc, DL, ValTy, NewOperand,
874 DAG.getConstant(Pos, DL, MVT::i32),
875 DAG.getConstant(SMSize, DL, MVT::i32));
876}
877
880 const MipsSubtarget &Subtarget) {
881 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
882 return SDValue();
883
884 SDValue FirstOperand = N->getOperand(0), SecondOperand = N->getOperand(1);
885 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
886 ConstantSDNode *CN, *CN1;
887
888 if ((FirstOperand.getOpcode() == ISD::AND &&
889 SecondOperand.getOpcode() == ISD::SHL) ||
890 (FirstOperand.getOpcode() == ISD::SHL &&
891 SecondOperand.getOpcode() == ISD::AND)) {
892 // Pattern match INS.
893 // $dst = or (and $src1, (2**size0 - 1)), (shl $src2, size0)
894 // ==> ins $src1, $src2, pos, size, pos = size0, size = 32 - pos;
895 // Or:
896 // $dst = or (shl $src2, size0), (and $src1, (2**size0 - 1))
897 // ==> ins $src1, $src2, pos, size, pos = size0, size = 32 - pos;
898 SDValue AndOperand0 = FirstOperand.getOpcode() == ISD::AND
899 ? FirstOperand.getOperand(0)
900 : SecondOperand.getOperand(0);
901 SDValue ShlOperand0 = FirstOperand.getOpcode() == ISD::AND
902 ? SecondOperand.getOperand(0)
903 : FirstOperand.getOperand(0);
904 SDValue AndMask = FirstOperand.getOpcode() == ISD::AND
905 ? FirstOperand.getOperand(1)
906 : SecondOperand.getOperand(1);
907 if (!(CN = dyn_cast<ConstantSDNode>(AndMask)) ||
908 !isShiftedMask_64(CN->getZExtValue(), SMPos0, SMSize0))
909 return SDValue();
910
911 SDValue ShlShift = FirstOperand.getOpcode() == ISD::AND
912 ? SecondOperand.getOperand(1)
913 : FirstOperand.getOperand(1);
914 if (!(CN = dyn_cast<ConstantSDNode>(ShlShift)))
915 return SDValue();
916 uint64_t ShlShiftValue = CN->getZExtValue();
917
918 if (SMPos0 != 0 || SMSize0 != ShlShiftValue)
919 return SDValue();
920
921 SDLoc DL(N);
922 EVT ValTy = N->getValueType(0);
923 SMPos1 = ShlShiftValue;
924 assert(SMPos1 < ValTy.getSizeInBits());
925 SMSize1 = (ValTy == MVT::i64 ? 64 : 32) - SMPos1;
926 return DAG.getNode(MipsISD::Ins, DL, ValTy, ShlOperand0,
927 DAG.getConstant(SMPos1, DL, MVT::i32),
928 DAG.getConstant(SMSize1, DL, MVT::i32), AndOperand0);
929 }
930
931 // See if Op's first operand matches (and $src1 , mask0).
932 if (FirstOperand.getOpcode() != ISD::AND)
933 return SDValue();
934
935 // Pattern match INS.
936 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
937 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
938 // => ins $dst, $src, size, pos, $src1
939 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
940 !isShiftedMask_64(~CN->getSExtValue(), SMPos0, SMSize0))
941 return SDValue();
942
943 // See if Op's second operand matches (and (shl $src, pos), mask1).
944 if (SecondOperand.getOpcode() == ISD::AND &&
945 SecondOperand.getOperand(0).getOpcode() == ISD::SHL) {
946
947 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand.getOperand(1))) ||
948 !isShiftedMask_64(CN->getZExtValue(), SMPos1, SMSize1))
949 return SDValue();
950
951 // The shift masks must have the same position and size.
952 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
953 return SDValue();
954
955 SDValue Shl = SecondOperand.getOperand(0);
956
957 if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1))))
958 return SDValue();
959
960 unsigned Shamt = CN->getZExtValue();
961
962 // Return if the shift amount and the first bit position of mask are not the
963 // same.
964 EVT ValTy = N->getValueType(0);
965 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
966 return SDValue();
967
968 SDLoc DL(N);
969 return DAG.getNode(MipsISD::Ins, DL, ValTy, Shl.getOperand(0),
970 DAG.getConstant(SMPos0, DL, MVT::i32),
971 DAG.getConstant(SMSize0, DL, MVT::i32),
972 FirstOperand.getOperand(0));
973 } else {
974 // Pattern match DINS.
975 // $dst = or (and $src, mask0), mask1
976 // where mask0 = ((1 << SMSize0) -1) << SMPos0
977 // => dins $dst, $src, pos, size
978 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
979 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||
980 (SMSize0 + SMPos0 <= 32))) {
981 // Check if AND instruction has constant as argument
982 bool isConstCase = SecondOperand.getOpcode() != ISD::AND;
983 if (SecondOperand.getOpcode() == ISD::AND) {
984 if (!(CN1 = dyn_cast<ConstantSDNode>(SecondOperand->getOperand(1))))
985 return SDValue();
986 } else {
987 if (!(CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1))))
988 return SDValue();
989 }
990 // Don't generate INS if constant OR operand doesn't fit into bits
991 // cleared by constant AND operand.
992 if (CN->getSExtValue() & CN1->getSExtValue())
993 return SDValue();
994
995 SDLoc DL(N);
996 EVT ValTy = N->getOperand(0)->getValueType(0);
997 SDValue Const1;
998 SDValue SrlX;
999 if (!isConstCase) {
1000 Const1 = DAG.getConstant(SMPos0, DL, MVT::i32);
1001 SrlX = DAG.getNode(ISD::SRL, DL, SecondOperand->getValueType(0),
1002 SecondOperand, Const1);
1003 }
1004 return DAG.getNode(
1005 MipsISD::Ins, DL, N->getValueType(0),
1006 isConstCase
1007 ? DAG.getConstant(CN1->getSExtValue() >> SMPos0, DL, ValTy)
1008 : SrlX,
1009 DAG.getConstant(SMPos0, DL, MVT::i32),
1010 DAG.getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
1011 : SMSize0,
1012 DL, MVT::i32),
1013 FirstOperand->getOperand(0));
1014 }
1015 return SDValue();
1016 }
1017}
1018
1020 const MipsSubtarget &Subtarget) {
1021 // ROOTNode must have a multiplication as an operand for the match to be
1022 // successful.
1023 if (ROOTNode->getOperand(0).getOpcode() != ISD::MUL &&
1024 ROOTNode->getOperand(1).getOpcode() != ISD::MUL)
1025 return SDValue();
1026
1027 // In the case where we have a multiplication as the left operand of
1028 // of a subtraction, we can't combine into a MipsISD::MSub node as the
1029 // the instruction definition of msub(u) places the multiplication on
1030 // on the right.
1031 if (ROOTNode->getOpcode() == ISD::SUB &&
1032 ROOTNode->getOperand(0).getOpcode() == ISD::MUL)
1033 return SDValue();
1034
1035 // We don't handle vector types here.
1036 if (ROOTNode->getValueType(0).isVector())
1037 return SDValue();
1038
1039 // For MIPS64, madd / msub instructions are inefficent to use with 64 bit
1040 // arithmetic. E.g.
1041 // (add (mul a b) c) =>
1042 // let res = (madd (mthi (drotr c 32))x(mtlo c) a b) in
1043 // MIPS64: (or (dsll (mfhi res) 32) (dsrl (dsll (mflo res) 32) 32)
1044 // or
1045 // MIPS64R2: (dins (mflo res) (mfhi res) 32 32)
1046 //
1047 // The overhead of setting up the Hi/Lo registers and reassembling the
1048 // result makes this a dubious optimzation for MIPS64. The core of the
1049 // problem is that Hi/Lo contain the upper and lower 32 bits of the
1050 // operand and result.
1051 //
1052 // It requires a chain of 4 add/mul for MIPS64R2 to get better code
1053 // density than doing it naively, 5 for MIPS64. Additionally, using
1054 // madd/msub on MIPS64 requires the operands actually be 32 bit sign
1055 // extended operands, not true 64 bit values.
1056 //
1057 // FIXME: For the moment, disable this completely for MIPS64.
1058 if (Subtarget.hasMips64())
1059 return SDValue();
1060
1061 SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1062 ? ROOTNode->getOperand(0)
1063 : ROOTNode->getOperand(1);
1064
1065 SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1066 ? ROOTNode->getOperand(1)
1067 : ROOTNode->getOperand(0);
1068
1069 // Transform this to a MADD only if the user of this node is the add.
1070 // If there are other users of the mul, this function returns here.
1071 if (!Mult.hasOneUse())
1072 return SDValue();
1073
1074 // maddu and madd are unusual instructions in that on MIPS64 bits 63..31
1075 // must be in canonical form, i.e. sign extended. For MIPS32, the operands
1076 // of the multiply must have 32 or more sign bits, otherwise we cannot
1077 // perform this optimization. We have to check this here as we're performing
1078 // this optimization pre-legalization.
1079 SDValue MultLHS = Mult->getOperand(0);
1080 SDValue MultRHS = Mult->getOperand(1);
1081
1082 bool IsSigned = MultLHS->getOpcode() == ISD::SIGN_EXTEND &&
1083 MultRHS->getOpcode() == ISD::SIGN_EXTEND;
1084 bool IsUnsigned = MultLHS->getOpcode() == ISD::ZERO_EXTEND &&
1085 MultRHS->getOpcode() == ISD::ZERO_EXTEND;
1086
1087 if (!IsSigned && !IsUnsigned)
1088 return SDValue();
1089
1090 // Initialize accumulator.
1091 SDLoc DL(ROOTNode);
1092 SDValue BottomHalf, TopHalf;
1093 std::tie(BottomHalf, TopHalf) =
1094 CurDAG.SplitScalar(AddOperand, DL, MVT::i32, MVT::i32);
1095 SDValue ACCIn =
1096 CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, BottomHalf, TopHalf);
1097
1098 // Create MipsMAdd(u) / MipsMSub(u) node.
1099 bool IsAdd = ROOTNode->getOpcode() == ISD::ADD;
1100 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
1101 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1102 SDValue MAddOps[3] = {
1103 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(0)),
1104 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(1)), ACCIn};
1105 SDValue MAdd = CurDAG.getNode(Opcode, DL, MVT::Untyped, MAddOps);
1106
1107 SDValue ResLo = CurDAG.getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
1108 SDValue ResHi = CurDAG.getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
1109 SDValue Combined =
1110 CurDAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResLo, ResHi);
1111 return Combined;
1112}
1113
1116 const MipsSubtarget &Subtarget) {
1117 // (sub v0 (mul v1, v2)) => (msub v1, v2, v0)
1118 if (DCI.isBeforeLegalizeOps()) {
1119 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1120 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1121 return performMADD_MSUBCombine(N, DAG, Subtarget);
1122
1123 return SDValue();
1124 }
1125
1126 return SDValue();
1127}
1128
1131 const MipsSubtarget &Subtarget) {
1132 // (add v0 (mul v1, v2)) => (madd v1, v2, v0)
1133 if (DCI.isBeforeLegalizeOps()) {
1134 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1135 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1136 return performMADD_MSUBCombine(N, DAG, Subtarget);
1137
1138 return SDValue();
1139 }
1140
1141 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
1142 SDValue Add = N->getOperand(1);
1143
1144 if (Add.getOpcode() != ISD::ADD)
1145 return SDValue();
1146
1147 SDValue Lo = Add.getOperand(1);
1148
1149 if ((Lo.getOpcode() != MipsISD::Lo) ||
1150 (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
1151 return SDValue();
1152
1153 EVT ValTy = N->getValueType(0);
1154 SDLoc DL(N);
1155
1156 SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
1157 Add.getOperand(0));
1158 return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
1159}
1160
1163 const MipsSubtarget &Subtarget) {
1164 // Pattern match CINS.
1165 // $dst = shl (and $src , imm), pos
1166 // => cins $dst, $src, pos, size
1167
1168 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasCnMips())
1169 return SDValue();
1170
1171 SDValue FirstOperand = N->getOperand(0);
1172 unsigned FirstOperandOpc = FirstOperand.getOpcode();
1173 SDValue SecondOperand = N->getOperand(1);
1174 EVT ValTy = N->getValueType(0);
1175 SDLoc DL(N);
1176
1177 uint64_t Pos = 0;
1178 unsigned SMPos, SMSize;
1179 ConstantSDNode *CN;
1180 SDValue NewOperand;
1181
1182 // The second operand of the shift must be an immediate.
1183 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1184 return SDValue();
1185
1186 Pos = CN->getZExtValue();
1187
1188 if (Pos >= ValTy.getSizeInBits())
1189 return SDValue();
1190
1191 if (FirstOperandOpc != ISD::AND)
1192 return SDValue();
1193
1194 // AND's second operand must be a shifted mask.
1195 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
1196 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
1197 return SDValue();
1198
1199 // Return if the shifted mask does not start at bit 0 or the sum of its size
1200 // and Pos exceeds the word's size.
1201 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1202 return SDValue();
1203
1204 NewOperand = FirstOperand.getOperand(0);
1205 // SMSize is 'location' (position) in this case, not size.
1206 SMSize--;
1207
1208 return DAG.getNode(MipsISD::CIns, DL, ValTy, NewOperand,
1209 DAG.getConstant(Pos, DL, MVT::i32),
1210 DAG.getConstant(SMSize, DL, MVT::i32));
1211}
1212
1214 const {
1215 SelectionDAG &DAG = DCI.DAG;
1216 unsigned Opc = N->getOpcode();
1217
1218 switch (Opc) {
1219 default: break;
1220 case ISD::SDIVREM:
1221 case ISD::UDIVREM:
1222 return performDivRemCombine(N, DAG, DCI, Subtarget);
1223 case ISD::SELECT:
1224 return performSELECTCombine(N, DAG, DCI, Subtarget);
1225 case MipsISD::CMovFP_F:
1226 case MipsISD::CMovFP_T:
1227 return performCMovFPCombine(N, DAG, DCI, Subtarget);
1228 case ISD::AND:
1229 return performANDCombine(N, DAG, DCI, Subtarget);
1230 case ISD::OR:
1231 return performORCombine(N, DAG, DCI, Subtarget);
1232 case ISD::ADD:
1233 return performADDCombine(N, DAG, DCI, Subtarget);
1234 case ISD::SHL:
1235 return performSHLCombine(N, DAG, DCI, Subtarget);
1236 case ISD::SUB:
1237 return performSUBCombine(N, DAG, DCI, Subtarget);
1238 }
1239
1240 return SDValue();
1241}
1242
1244 return Subtarget.hasMips32();
1245}
1246
1248 return Subtarget.hasMips32();
1249}
1250
1252 // We can use ANDI+SLTIU as a bit test. Y contains the bit position.
1253 // For MIPSR2 or later, we may be able to use the `ext` instruction or its'
1254 // double-word variants.
1255 if (auto *C = dyn_cast<ConstantSDNode>(Y))
1256 return C->getAPIntValue().ule(15);
1257
1258 return false;
1259}
1260
1262 const SDNode *N, CombineLevel Level) const {
1263 assert(((N->getOpcode() == ISD::SHL &&
1264 N->getOperand(0).getOpcode() == ISD::SRL) ||
1265 (N->getOpcode() == ISD::SRL &&
1266 N->getOperand(0).getOpcode() == ISD::SHL)) &&
1267 "Expected shift-shift mask");
1268
1269 if (N->getOperand(0).getValueType().isVector())
1270 return false;
1271 return true;
1272}
1273
1274void
1277 SelectionDAG &DAG) const {
1278 return LowerOperationWrapper(N, Results, DAG);
1279}
1280
1283{
1284 switch (Op.getOpcode())
1285 {
1286 case ISD::BRCOND: return lowerBRCOND(Op, DAG);
1287 case ISD::ConstantPool: return lowerConstantPool(Op, DAG);
1288 case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG);
1289 case ISD::BlockAddress: return lowerBlockAddress(Op, DAG);
1290 case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG);
1291 case ISD::JumpTable: return lowerJumpTable(Op, DAG);
1292 case ISD::SELECT: return lowerSELECT(Op, DAG);
1293 case ISD::SETCC: return lowerSETCC(Op, DAG);
1294 case ISD::VASTART: return lowerVASTART(Op, DAG);
1295 case ISD::VAARG: return lowerVAARG(Op, DAG);
1296 case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
1297 case ISD::FABS: return lowerFABS(Op, DAG);
1298 case ISD::FCANONICALIZE:
1299 return lowerFCANONICALIZE(Op, DAG);
1300 case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
1301 case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
1302 case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
1303 case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
1304 case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
1305 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
1306 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);
1307 case ISD::LOAD: return lowerLOAD(Op, DAG);
1308 case ISD::STORE: return lowerSTORE(Op, DAG);
1309 case ISD::EH_DWARF_CFA: return lowerEH_DWARF_CFA(Op, DAG);
1310 case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
1311 }
1312 return SDValue();
1313}
1314
1315//===----------------------------------------------------------------------===//
1316// Lower helper functions
1317//===----------------------------------------------------------------------===//
1318
1319// addLiveIn - This helper function adds the specified physical register to the
1320// MachineFunction as a live in value. It also creates a corresponding
1321// virtual register for it.
1322static unsigned
1323addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
1324{
1326 MF.getRegInfo().addLiveIn(PReg, VReg);
1327 return VReg;
1328}
1329
1332 const TargetInstrInfo &TII,
1333 bool Is64Bit, bool IsMicroMips) {
1334 if (NoZeroDivCheck)
1335 return &MBB;
1336
1337 // Insert instruction "teq $divisor_reg, $zero, 7".
1340 MachineOperand &Divisor = MI.getOperand(2);
1341 MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),
1342 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1343 .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
1344 .addReg(Mips::ZERO)
1345 .addImm(7);
1346
1347 // Use the 32-bit sub-register if this is a 64-bit division.
1348 if (Is64Bit)
1349 MIB->getOperand(0).setSubReg(Mips::sub_32);
1350
1351 // Clear Divisor's kill flag.
1352 Divisor.setIsKill(false);
1353
1354 // We would normally delete the original instruction here but in this case
1355 // we only needed to inject an additional instruction rather than replace it.
1356
1357 return &MBB;
1358}
1359
1362 MachineBasicBlock *BB) const {
1363 switch (MI.getOpcode()) {
1364 default:
1365 llvm_unreachable("Unexpected instr type to insert");
1366 case Mips::ATOMIC_LOAD_ADD_I8:
1367 return emitAtomicBinaryPartword(MI, BB, 1);
1368 case Mips::ATOMIC_LOAD_ADD_I16:
1369 return emitAtomicBinaryPartword(MI, BB, 2);
1370 case Mips::ATOMIC_LOAD_ADD_I32:
1371 return emitAtomicBinary(MI, BB);
1372 case Mips::ATOMIC_LOAD_ADD_I64:
1373 return emitAtomicBinary(MI, BB);
1374
1375 case Mips::ATOMIC_LOAD_AND_I8:
1376 return emitAtomicBinaryPartword(MI, BB, 1);
1377 case Mips::ATOMIC_LOAD_AND_I16:
1378 return emitAtomicBinaryPartword(MI, BB, 2);
1379 case Mips::ATOMIC_LOAD_AND_I32:
1380 return emitAtomicBinary(MI, BB);
1381 case Mips::ATOMIC_LOAD_AND_I64:
1382 return emitAtomicBinary(MI, BB);
1383
1384 case Mips::ATOMIC_LOAD_OR_I8:
1385 return emitAtomicBinaryPartword(MI, BB, 1);
1386 case Mips::ATOMIC_LOAD_OR_I16:
1387 return emitAtomicBinaryPartword(MI, BB, 2);
1388 case Mips::ATOMIC_LOAD_OR_I32:
1389 return emitAtomicBinary(MI, BB);
1390 case Mips::ATOMIC_LOAD_OR_I64:
1391 return emitAtomicBinary(MI, BB);
1392
1393 case Mips::ATOMIC_LOAD_XOR_I8:
1394 return emitAtomicBinaryPartword(MI, BB, 1);
1395 case Mips::ATOMIC_LOAD_XOR_I16:
1396 return emitAtomicBinaryPartword(MI, BB, 2);
1397 case Mips::ATOMIC_LOAD_XOR_I32:
1398 return emitAtomicBinary(MI, BB);
1399 case Mips::ATOMIC_LOAD_XOR_I64:
1400 return emitAtomicBinary(MI, BB);
1401
1402 case Mips::ATOMIC_LOAD_NAND_I8:
1403 return emitAtomicBinaryPartword(MI, BB, 1);
1404 case Mips::ATOMIC_LOAD_NAND_I16:
1405 return emitAtomicBinaryPartword(MI, BB, 2);
1406 case Mips::ATOMIC_LOAD_NAND_I32:
1407 return emitAtomicBinary(MI, BB);
1408 case Mips::ATOMIC_LOAD_NAND_I64:
1409 return emitAtomicBinary(MI, BB);
1410
1411 case Mips::ATOMIC_LOAD_SUB_I8:
1412 return emitAtomicBinaryPartword(MI, BB, 1);
1413 case Mips::ATOMIC_LOAD_SUB_I16:
1414 return emitAtomicBinaryPartword(MI, BB, 2);
1415 case Mips::ATOMIC_LOAD_SUB_I32:
1416 return emitAtomicBinary(MI, BB);
1417 case Mips::ATOMIC_LOAD_SUB_I64:
1418 return emitAtomicBinary(MI, BB);
1419
1420 case Mips::ATOMIC_SWAP_I8:
1421 return emitAtomicBinaryPartword(MI, BB, 1);
1422 case Mips::ATOMIC_SWAP_I16:
1423 return emitAtomicBinaryPartword(MI, BB, 2);
1424 case Mips::ATOMIC_SWAP_I32:
1425 return emitAtomicBinary(MI, BB);
1426 case Mips::ATOMIC_SWAP_I64:
1427 return emitAtomicBinary(MI, BB);
1428
1429 case Mips::ATOMIC_CMP_SWAP_I8:
1430 return emitAtomicCmpSwapPartword(MI, BB, 1);
1431 case Mips::ATOMIC_CMP_SWAP_I16:
1432 return emitAtomicCmpSwapPartword(MI, BB, 2);
1433 case Mips::ATOMIC_CMP_SWAP_I32:
1434 return emitAtomicCmpSwap(MI, BB);
1435 case Mips::ATOMIC_CMP_SWAP_I64:
1436 return emitAtomicCmpSwap(MI, BB);
1437
1438 case Mips::ATOMIC_LOAD_MIN_I8:
1439 return emitAtomicBinaryPartword(MI, BB, 1);
1440 case Mips::ATOMIC_LOAD_MIN_I16:
1441 return emitAtomicBinaryPartword(MI, BB, 2);
1442 case Mips::ATOMIC_LOAD_MIN_I32:
1443 return emitAtomicBinary(MI, BB);
1444 case Mips::ATOMIC_LOAD_MIN_I64:
1445 return emitAtomicBinary(MI, BB);
1446
1447 case Mips::ATOMIC_LOAD_MAX_I8:
1448 return emitAtomicBinaryPartword(MI, BB, 1);
1449 case Mips::ATOMIC_LOAD_MAX_I16:
1450 return emitAtomicBinaryPartword(MI, BB, 2);
1451 case Mips::ATOMIC_LOAD_MAX_I32:
1452 return emitAtomicBinary(MI, BB);
1453 case Mips::ATOMIC_LOAD_MAX_I64:
1454 return emitAtomicBinary(MI, BB);
1455
1456 case Mips::ATOMIC_LOAD_UMIN_I8:
1457 return emitAtomicBinaryPartword(MI, BB, 1);
1458 case Mips::ATOMIC_LOAD_UMIN_I16:
1459 return emitAtomicBinaryPartword(MI, BB, 2);
1460 case Mips::ATOMIC_LOAD_UMIN_I32:
1461 return emitAtomicBinary(MI, BB);
1462 case Mips::ATOMIC_LOAD_UMIN_I64:
1463 return emitAtomicBinary(MI, BB);
1464
1465 case Mips::ATOMIC_LOAD_UMAX_I8:
1466 return emitAtomicBinaryPartword(MI, BB, 1);
1467 case Mips::ATOMIC_LOAD_UMAX_I16:
1468 return emitAtomicBinaryPartword(MI, BB, 2);
1469 case Mips::ATOMIC_LOAD_UMAX_I32:
1470 return emitAtomicBinary(MI, BB);
1471 case Mips::ATOMIC_LOAD_UMAX_I64:
1472 return emitAtomicBinary(MI, BB);
1473
1474 case Mips::PseudoSDIV:
1475 case Mips::PseudoUDIV:
1476 case Mips::DIV:
1477 case Mips::DIVU:
1478 case Mips::MOD:
1479 case Mips::MODU:
1480 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false,
1481 false);
1482 case Mips::SDIV_MM_Pseudo:
1483 case Mips::UDIV_MM_Pseudo:
1484 case Mips::SDIV_MM:
1485 case Mips::UDIV_MM:
1486 case Mips::DIV_MMR6:
1487 case Mips::DIVU_MMR6:
1488 case Mips::MOD_MMR6:
1489 case Mips::MODU_MMR6:
1490 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false, true);
1491 case Mips::PseudoDSDIV:
1492 case Mips::PseudoDUDIV:
1493 case Mips::DDIV:
1494 case Mips::DDIVU:
1495 case Mips::DMOD:
1496 case Mips::DMODU:
1497 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true, false);
1498
1499 case Mips::PseudoSELECT_I:
1500 case Mips::PseudoSELECT_I64:
1501 case Mips::PseudoSELECT_S:
1502 case Mips::PseudoSELECT_D32:
1503 case Mips::PseudoSELECT_D64:
1504 return emitPseudoSELECT(MI, BB, false, Mips::BNE);
1505 case Mips::PseudoSELECTFP_F_I:
1506 case Mips::PseudoSELECTFP_F_I64:
1507 case Mips::PseudoSELECTFP_F_S:
1508 case Mips::PseudoSELECTFP_F_D32:
1509 case Mips::PseudoSELECTFP_F_D64:
1510 return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
1511 case Mips::PseudoSELECTFP_T_I:
1512 case Mips::PseudoSELECTFP_T_I64:
1513 case Mips::PseudoSELECTFP_T_S:
1514 case Mips::PseudoSELECTFP_T_D32:
1515 case Mips::PseudoSELECTFP_T_D64:
1516 return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
1517 case Mips::PseudoD_SELECT_I:
1518 case Mips::PseudoD_SELECT_I64:
1519 return emitPseudoD_SELECT(MI, BB);
1520 case Mips::LDR_W:
1521 return emitLDR_W(MI, BB);
1522 case Mips::LDR_D:
1523 return emitLDR_D(MI, BB);
1524 case Mips::STR_W:
1525 return emitSTR_W(MI, BB);
1526 case Mips::STR_D:
1527 return emitSTR_D(MI, BB);
1528 }
1529}
1530
1531// This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
1532// Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
1534MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
1535 MachineBasicBlock *BB) const {
1536
1537 MachineFunction *MF = BB->getParent();
1538 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1540 DebugLoc DL = MI.getDebugLoc();
1541
1542 unsigned AtomicOp;
1543 bool NeedsAdditionalReg = false;
1544 switch (MI.getOpcode()) {
1545 case Mips::ATOMIC_LOAD_ADD_I32:
1546 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1547 break;
1548 case Mips::ATOMIC_LOAD_SUB_I32:
1549 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1550 break;
1551 case Mips::ATOMIC_LOAD_AND_I32:
1552 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1553 break;
1554 case Mips::ATOMIC_LOAD_OR_I32:
1555 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1556 break;
1557 case Mips::ATOMIC_LOAD_XOR_I32:
1558 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1559 break;
1560 case Mips::ATOMIC_LOAD_NAND_I32:
1561 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1562 break;
1563 case Mips::ATOMIC_SWAP_I32:
1564 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1565 break;
1566 case Mips::ATOMIC_LOAD_ADD_I64:
1567 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1568 break;
1569 case Mips::ATOMIC_LOAD_SUB_I64:
1570 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1571 break;
1572 case Mips::ATOMIC_LOAD_AND_I64:
1573 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1574 break;
1575 case Mips::ATOMIC_LOAD_OR_I64:
1576 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1577 break;
1578 case Mips::ATOMIC_LOAD_XOR_I64:
1579 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1580 break;
1581 case Mips::ATOMIC_LOAD_NAND_I64:
1582 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1583 break;
1584 case Mips::ATOMIC_SWAP_I64:
1585 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1586 break;
1587 case Mips::ATOMIC_LOAD_MIN_I32:
1588 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1589 NeedsAdditionalReg = true;
1590 break;
1591 case Mips::ATOMIC_LOAD_MAX_I32:
1592 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1593 NeedsAdditionalReg = true;
1594 break;
1595 case Mips::ATOMIC_LOAD_UMIN_I32:
1596 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1597 NeedsAdditionalReg = true;
1598 break;
1599 case Mips::ATOMIC_LOAD_UMAX_I32:
1600 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1601 NeedsAdditionalReg = true;
1602 break;
1603 case Mips::ATOMIC_LOAD_MIN_I64:
1604 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1605 NeedsAdditionalReg = true;
1606 break;
1607 case Mips::ATOMIC_LOAD_MAX_I64:
1608 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1609 NeedsAdditionalReg = true;
1610 break;
1611 case Mips::ATOMIC_LOAD_UMIN_I64:
1612 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1613 NeedsAdditionalReg = true;
1614 break;
1615 case Mips::ATOMIC_LOAD_UMAX_I64:
1616 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1617 NeedsAdditionalReg = true;
1618 break;
1619 default:
1620 llvm_unreachable("Unknown pseudo atomic for replacement!");
1621 }
1622
1623 Register OldVal = MI.getOperand(0).getReg();
1624 Register Ptr = MI.getOperand(1).getReg();
1625 Register Incr = MI.getOperand(2).getReg();
1626 Register Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1627
1629
1630 // The scratch registers here with the EarlyClobber | Define | Implicit
1631 // flags is used to persuade the register allocator and the machine
1632 // verifier to accept the usage of this register. This has to be a real
1633 // register which has an UNDEF value but is dead after the instruction which
1634 // is unique among the registers chosen for the instruction.
1635
1636 // The EarlyClobber flag has the semantic properties that the operand it is
1637 // attached to is clobbered before the rest of the inputs are read. Hence it
1638 // must be unique among the operands to the instruction.
1639 // The Define flag is needed to coerce the machine verifier that an Undef
1640 // value isn't a problem.
1641 // The Dead flag is needed as the value in scratch isn't used by any other
1642 // instruction. Kill isn't used as Dead is more precise.
1643 // The implicit flag is here due to the interaction between the other flags
1644 // and the machine verifier.
1645
1646 // For correctness purpose, a new pseudo is introduced here. We need this
1647 // new pseudo, so that FastRegisterAllocator does not see an ll/sc sequence
1648 // that is spread over >1 basic blocks. A register allocator which
1649 // introduces (or any codegen infact) a store, can violate the expectations
1650 // of the hardware.
1651 //
1652 // An atomic read-modify-write sequence starts with a linked load
1653 // instruction and ends with a store conditional instruction. The atomic
1654 // read-modify-write sequence fails if any of the following conditions
1655 // occur between the execution of ll and sc:
1656 // * A coherent store is completed by another process or coherent I/O
1657 // module into the block of synchronizable physical memory containing
1658 // the word. The size and alignment of the block is
1659 // implementation-dependent.
1660 // * A coherent store is executed between an LL and SC sequence on the
1661 // same processor to the block of synchornizable physical memory
1662 // containing the word.
1663 //
1664
1665 Register PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr));
1666 Register IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr));
1667
1668 BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr);
1669 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1670
1672 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1674 .addReg(PtrCopy)
1675 .addReg(IncrCopy)
1678 if (NeedsAdditionalReg) {
1679 Register Scratch2 =
1680 RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1683 }
1684
1685 MI.eraseFromParent();
1686
1687 return BB;
1688}
1689
1690MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
1691 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
1692 unsigned SrcReg) const {
1694 const DebugLoc &DL = MI.getDebugLoc();
1695
1696 if (Subtarget.hasMips32r2() && Size == 1) {
1697 BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
1698 return BB;
1699 }
1700
1701 if (Subtarget.hasMips32r2() && Size == 2) {
1702 BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
1703 return BB;
1704 }
1705
1706 MachineFunction *MF = BB->getParent();
1708 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1709 Register ScrReg = RegInfo.createVirtualRegister(RC);
1710
1711 assert(Size < 32);
1712 int64_t ShiftImm = 32 - (Size * 8);
1713
1714 BuildMI(BB, DL, TII->get(Mips::SLL), ScrReg).addReg(SrcReg).addImm(ShiftImm);
1715 BuildMI(BB, DL, TII->get(Mips::SRA), DstReg).addReg(ScrReg).addImm(ShiftImm);
1716
1717 return BB;
1718}
1719
1720MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
1721 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1722 assert((Size == 1 || Size == 2) &&
1723 "Unsupported size for EmitAtomicBinaryPartial.");
1724
1725 MachineFunction *MF = BB->getParent();
1727 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1728 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1729 const TargetRegisterClass *RCp =
1730 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1732 DebugLoc DL = MI.getDebugLoc();
1733
1734 Register Dest = MI.getOperand(0).getReg();
1735 Register Ptr = MI.getOperand(1).getReg();
1736 Register Incr = MI.getOperand(2).getReg();
1737
1738 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1739 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1740 Register Mask = RegInfo.createVirtualRegister(RC);
1741 Register Mask2 = RegInfo.createVirtualRegister(RC);
1742 Register Incr2 = RegInfo.createVirtualRegister(RC);
1743 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1744 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1745 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1746 Register Scratch = RegInfo.createVirtualRegister(RC);
1747 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1748 Register Scratch3 = RegInfo.createVirtualRegister(RC);
1749
1750 unsigned AtomicOp = 0;
1751 bool NeedsAdditionalReg = false;
1752 switch (MI.getOpcode()) {
1753 case Mips::ATOMIC_LOAD_NAND_I8:
1754 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1755 break;
1756 case Mips::ATOMIC_LOAD_NAND_I16:
1757 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1758 break;
1759 case Mips::ATOMIC_SWAP_I8:
1760 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1761 break;
1762 case Mips::ATOMIC_SWAP_I16:
1763 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1764 break;
1765 case Mips::ATOMIC_LOAD_ADD_I8:
1766 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1767 break;
1768 case Mips::ATOMIC_LOAD_ADD_I16:
1769 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1770 break;
1771 case Mips::ATOMIC_LOAD_SUB_I8:
1772 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1773 break;
1774 case Mips::ATOMIC_LOAD_SUB_I16:
1775 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1776 break;
1777 case Mips::ATOMIC_LOAD_AND_I8:
1778 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1779 break;
1780 case Mips::ATOMIC_LOAD_AND_I16:
1781 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1782 break;
1783 case Mips::ATOMIC_LOAD_OR_I8:
1784 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1785 break;
1786 case Mips::ATOMIC_LOAD_OR_I16:
1787 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1788 break;
1789 case Mips::ATOMIC_LOAD_XOR_I8:
1790 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1791 break;
1792 case Mips::ATOMIC_LOAD_XOR_I16:
1793 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1794 break;
1795 case Mips::ATOMIC_LOAD_MIN_I8:
1796 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1797 NeedsAdditionalReg = true;
1798 break;
1799 case Mips::ATOMIC_LOAD_MIN_I16:
1800 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1801 NeedsAdditionalReg = true;
1802 break;
1803 case Mips::ATOMIC_LOAD_MAX_I8:
1804 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1805 NeedsAdditionalReg = true;
1806 break;
1807 case Mips::ATOMIC_LOAD_MAX_I16:
1808 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1809 NeedsAdditionalReg = true;
1810 break;
1811 case Mips::ATOMIC_LOAD_UMIN_I8:
1812 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1813 NeedsAdditionalReg = true;
1814 break;
1815 case Mips::ATOMIC_LOAD_UMIN_I16:
1816 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1817 NeedsAdditionalReg = true;
1818 break;
1819 case Mips::ATOMIC_LOAD_UMAX_I8:
1820 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1821 NeedsAdditionalReg = true;
1822 break;
1823 case Mips::ATOMIC_LOAD_UMAX_I16:
1824 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1825 NeedsAdditionalReg = true;
1826 break;
1827 default:
1828 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
1829 }
1830
1831 // insert new blocks after the current block
1832 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1833 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1835 MF->insert(It, exitMBB);
1836
1837 // Transfer the remainder of BB and its successor edges to exitMBB.
1838 exitMBB->splice(exitMBB->begin(), BB,
1839 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1841
1843
1844 // thisMBB:
1845 // addiu masklsb2,$0,-4 # 0xfffffffc
1846 // and alignedaddr,ptr,masklsb2
1847 // andi ptrlsb2,ptr,3
1848 // sll shiftamt,ptrlsb2,3
1849 // ori maskupper,$0,255 # 0xff
1850 // sll mask,maskupper,shiftamt
1851 // nor mask2,$0,mask
1852 // sll incr2,incr,shiftamt
1853
1854 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1855 BuildMI(BB, DL, TII->get(ABI.GetPtrAddiuOp()), MaskLSB2)
1856 .addReg(ABI.GetNullPtr()).addImm(-4);
1857 BuildMI(BB, DL, TII->get(ABI.GetPtrAndOp()), AlignedAddr)
1858 .addReg(Ptr).addReg(MaskLSB2);
1859 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1860 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1861 if (Subtarget.isLittle()) {
1862 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1863 } else {
1864 Register Off = RegInfo.createVirtualRegister(RC);
1865 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1866 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1867 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1868 }
1869 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1870 .addReg(Mips::ZERO).addImm(MaskImm);
1871 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1872 .addReg(MaskUpper).addReg(ShiftAmt);
1873 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1874 BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
1875
1876
1877 // The purposes of the flags on the scratch registers is explained in
1878 // emitAtomicBinary. In summary, we need a scratch register which is going to
1879 // be undef, that is unique among registers chosen for the instruction.
1880
1882 BuildMI(BB, DL, TII->get(AtomicOp))
1884 .addReg(AlignedAddr)
1885 .addReg(Incr2)
1886 .addReg(Mask)
1887 .addReg(Mask2)
1888 .addReg(ShiftAmt)
1895 if (NeedsAdditionalReg) {
1896 Register Scratch4 = RegInfo.createVirtualRegister(RC);
1899 }
1900
1901 MI.eraseFromParent(); // The instruction is gone now.
1902
1903 return exitMBB;
1904}
1905
1906// Lower atomic compare and swap to a pseudo instruction, taking care to
1907// define a scratch register for the pseudo instruction's expansion. The
1908// instruction is expanded after the register allocator as to prevent
1909// the insertion of stores between the linked load and the store conditional.
1910
1912MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
1913 MachineBasicBlock *BB) const {
1914
1915 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1916 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1917 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1918
1919 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1920
1921 MachineFunction *MF = BB->getParent();
1925 DebugLoc DL = MI.getDebugLoc();
1926
1927 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1928 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1929 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1930 Register Dest = MI.getOperand(0).getReg();
1931 Register Ptr = MI.getOperand(1).getReg();
1932 Register OldVal = MI.getOperand(2).getReg();
1933 Register NewVal = MI.getOperand(3).getReg();
1934
1935 Register Scratch = MRI.createVirtualRegister(RC);
1937
1938 // We need to create copies of the various registers and kill them at the
1939 // atomic pseudo. If the copies are not made, when the atomic is expanded
1940 // after fast register allocation, the spills will end up outside of the
1941 // blocks that their values are defined in, causing livein errors.
1942
1943 Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
1944 Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
1945 Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
1946
1947 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1948 BuildMI(*BB, II, DL, TII->get(Mips::COPY), OldValCopy).addReg(OldVal);
1949 BuildMI(*BB, II, DL, TII->get(Mips::COPY), NewValCopy).addReg(NewVal);
1950
1951 // The purposes of the flags on the scratch registers is explained in
1952 // emitAtomicBinary. In summary, we need a scratch register which is going to
1953 // be undef, that is unique among registers chosen for the instruction.
1954
1955 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1957 .addReg(PtrCopy, RegState::Kill)
1958 .addReg(OldValCopy, RegState::Kill)
1959 .addReg(NewValCopy, RegState::Kill)
1962
1963 MI.eraseFromParent(); // The instruction is gone now.
1964
1965 return BB;
1966}
1967
1968MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
1969 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1970 assert((Size == 1 || Size == 2) &&
1971 "Unsupported size for EmitAtomicCmpSwapPartial.");
1972
1973 MachineFunction *MF = BB->getParent();
1975 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1976 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1977 const TargetRegisterClass *RCp =
1978 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1980 DebugLoc DL = MI.getDebugLoc();
1981
1982 Register Dest = MI.getOperand(0).getReg();
1983 Register Ptr = MI.getOperand(1).getReg();
1984 Register CmpVal = MI.getOperand(2).getReg();
1985 Register NewVal = MI.getOperand(3).getReg();
1986
1987 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1988 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1989 Register Mask = RegInfo.createVirtualRegister(RC);
1990 Register Mask2 = RegInfo.createVirtualRegister(RC);
1991 Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
1992 Register ShiftedNewVal = RegInfo.createVirtualRegister(RC);
1993 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1994 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1995 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1996 Register MaskedCmpVal = RegInfo.createVirtualRegister(RC);
1997 Register MaskedNewVal = RegInfo.createVirtualRegister(RC);
1998 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1999 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
2000 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
2001
2002 // The scratch registers here with the EarlyClobber | Define | Dead | Implicit
2003 // flags are used to coerce the register allocator and the machine verifier to
2004 // accept the usage of these registers.
2005 // The EarlyClobber flag has the semantic properties that the operand it is
2006 // attached to is clobbered before the rest of the inputs are read. Hence it
2007 // must be unique among the operands to the instruction.
2008 // The Define flag is needed to coerce the machine verifier that an Undef
2009 // value isn't a problem.
2010 // The Dead flag is needed as the value in scratch isn't used by any other
2011 // instruction. Kill isn't used as Dead is more precise.
2012 Register Scratch = RegInfo.createVirtualRegister(RC);
2013 Register Scratch2 = RegInfo.createVirtualRegister(RC);
2014
2015 // insert new blocks after the current block
2016 const BasicBlock *LLVM_BB = BB->getBasicBlock();
2017 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
2019 MF->insert(It, exitMBB);
2020
2021 // Transfer the remainder of BB and its successor edges to exitMBB.
2022 exitMBB->splice(exitMBB->begin(), BB,
2023 std::next(MachineBasicBlock::iterator(MI)), BB->end());
2025
2027
2028 // thisMBB:
2029 // addiu masklsb2,$0,-4 # 0xfffffffc
2030 // and alignedaddr,ptr,masklsb2
2031 // andi ptrlsb2,ptr,3
2032 // xori ptrlsb2,ptrlsb2,3 # Only for BE
2033 // sll shiftamt,ptrlsb2,3
2034 // ori maskupper,$0,255 # 0xff
2035 // sll mask,maskupper,shiftamt
2036 // nor mask2,$0,mask
2037 // andi maskedcmpval,cmpval,255
2038 // sll shiftedcmpval,maskedcmpval,shiftamt
2039 // andi maskednewval,newval,255
2040 // sll shiftednewval,maskednewval,shiftamt
2041 int64_t MaskImm = (Size == 1) ? 255 : 65535;
2042 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
2043 .addReg(ABI.GetNullPtr()).addImm(-4);
2044 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
2045 .addReg(Ptr).addReg(MaskLSB2);
2046 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
2047 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
2048 if (Subtarget.isLittle()) {
2049 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
2050 } else {
2051 Register Off = RegInfo.createVirtualRegister(RC);
2052 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
2053 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
2054 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
2055 }
2056 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
2057 .addReg(Mips::ZERO).addImm(MaskImm);
2058 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
2059 .addReg(MaskUpper).addReg(ShiftAmt);
2060 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
2061 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
2062 .addReg(CmpVal).addImm(MaskImm);
2063 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
2064 .addReg(MaskedCmpVal).addReg(ShiftAmt);
2065 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
2066 .addReg(NewVal).addImm(MaskImm);
2067 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
2068 .addReg(MaskedNewVal).addReg(ShiftAmt);
2069
2070 // The purposes of the flags on the scratch registers are explained in
2071 // emitAtomicBinary. In summary, we need a scratch register which is going to
2072 // be undef, that is unique among the register chosen for the instruction.
2073
2074 BuildMI(BB, DL, TII->get(AtomicOp))
2076 .addReg(AlignedAddr)
2077 .addReg(Mask)
2078 .addReg(ShiftedCmpVal)
2079 .addReg(Mask2)
2080 .addReg(ShiftedNewVal)
2081 .addReg(ShiftAmt)
2086
2087 MI.eraseFromParent(); // The instruction is gone now.
2088
2089 return exitMBB;
2090}
2091
2092SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2093 // The first operand is the chain, the second is the condition, the third is
2094 // the block to branch to if the condition is true.
2095 SDValue Chain = Op.getOperand(0);
2096 SDValue Dest = Op.getOperand(2);
2097 SDLoc DL(Op);
2098
2100 SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
2101
2102 // Return if flag is not set by a floating point comparison.
2103 if (CondRes.getOpcode() != MipsISD::FPCmp)
2104 return Op;
2105
2106 SDValue CCNode = CondRes.getOperand(2);
2109 SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
2110 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
2111 return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
2112 FCC0, Dest, CondRes);
2113}
2114
2115SDValue MipsTargetLowering::
2116lowerSELECT(SDValue Op, SelectionDAG &DAG) const
2117{
2119 SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
2120
2121 // Return if flag is not set by a floating point comparison.
2122 if (Cond.getOpcode() != MipsISD::FPCmp)
2123 return Op;
2124
2125 return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
2126 SDLoc(Op));
2127}
2128
2129SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2131 SDValue Cond = createFPCmp(DAG, Op);
2132
2133 assert(Cond.getOpcode() == MipsISD::FPCmp &&
2134 "Floating point operand expected.");
2135
2136 SDLoc DL(Op);
2137 SDValue True = DAG.getConstant(1, DL, MVT::i32);
2138 SDValue False = DAG.getConstant(0, DL, MVT::i32);
2139
2140 return createCMovFP(DAG, Cond, True, False, DL);
2141}
2142
2143SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
2144 SelectionDAG &DAG) const {
2145 EVT Ty = Op.getValueType();
2146 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2147 const GlobalValue *GV = N->getGlobal();
2148
2149 if (!isPositionIndependent()) {
2150 const MipsTargetObjectFile *TLOF =
2151 static_cast<const MipsTargetObjectFile *>(
2153 const GlobalObject *GO = GV->getAliaseeObject();
2154 if (GO && TLOF->IsGlobalInSmallSection(GO, getTargetMachine()))
2155 // %gp_rel relocation
2156 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2157
2158 // %hi/%lo relocation
2159 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2160 // %highest/%higher/%hi/%lo relocation
2161 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2162 }
2163
2164 // Every other architecture would use shouldAssumeDSOLocal in here, but
2165 // mips is special.
2166 // * In PIC code mips requires got loads even for local statics!
2167 // * To save on got entries, for local statics the got entry contains the
2168 // page and an additional add instruction takes care of the low bits.
2169 // * It is legal to access a hidden symbol with a non hidden undefined,
2170 // so one cannot guarantee that all access to a hidden symbol will know
2171 // it is hidden.
2172 // * Mips linkers don't support creating a page and a full got entry for
2173 // the same symbol.
2174 // * Given all that, we have to use a full got entry for hidden symbols :-(
2175 if (GV->hasLocalLinkage())
2176 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2177
2178 if (Subtarget.useXGOT())
2179 return getAddrGlobalLargeGOT(
2181 DAG.getEntryNode(),
2182 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2183
2184 return getAddrGlobal(
2185 N, SDLoc(N), Ty, DAG,
2187 DAG.getEntryNode(), MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2188}
2189
2190SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
2191 SelectionDAG &DAG) const {
2192 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2193 EVT Ty = Op.getValueType();
2194
2195 if (!isPositionIndependent())
2196 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2197 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2198
2199 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2200}
2201
2202SDValue MipsTargetLowering::
2203lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
2204{
2205 // If the relocation model is PIC, use the General Dynamic TLS Model or
2206 // Local Dynamic TLS model, otherwise use the Initial Exec or
2207 // Local Exec TLS Model.
2208
2209 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2210 if (DAG.getTarget().useEmulatedTLS())
2211 return LowerToTLSEmulatedModel(GA, DAG);
2212
2213 SDLoc DL(GA);
2214 const GlobalValue *GV = GA->getGlobal();
2215 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2216
2218
2219 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2220 // General Dynamic and Local Dynamic TLS Model.
2221 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
2223
2224 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, Flag);
2226 getGlobalReg(DAG, PtrVT), TGA);
2227 unsigned PtrSize = PtrVT.getSizeInBits();
2228 IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
2229
2230 SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
2231
2233 ArgListEntry Entry;
2234 Entry.Node = Argument;
2235 Entry.Ty = PtrTy;
2236 Args.push_back(Entry);
2237
2239 CLI.setDebugLoc(DL)
2240 .setChain(DAG.getEntryNode())
2241 .setLibCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2242 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2243
2244 SDValue Ret = CallResult.first;
2245
2246 if (model != TLSModel::LocalDynamic)
2247 return Ret;
2248
2249 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2251 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2252 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2254 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2255 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Ret);
2256 return DAG.getNode(ISD::ADD, DL, PtrVT, Add, Lo);
2257 }
2258
2260 if (model == TLSModel::InitialExec) {
2261 // Initial Exec TLS Model
2262 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2264 TGA = DAG.getNode(MipsISD::Wrapper, DL, PtrVT, getGlobalReg(DAG, PtrVT),
2265 TGA);
2266 Offset =
2267 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), TGA, MachinePointerInfo());
2268 } else {
2269 // Local Exec TLS Model
2270 assert(model == TLSModel::LocalExec);
2271 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2273 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2275 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2276 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2277 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2278 }
2279
2281 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, Offset);
2282}
2283
2284SDValue MipsTargetLowering::
2285lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
2286{
2287 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2288 EVT Ty = Op.getValueType();
2289
2290 if (!isPositionIndependent())
2291 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2292 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2293
2294 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2295}
2296
2297SDValue MipsTargetLowering::
2298lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
2299{
2300 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2301 EVT Ty = Op.getValueType();
2302
2303 if (!isPositionIndependent()) {
2304 const MipsTargetObjectFile *TLOF =
2305 static_cast<const MipsTargetObjectFile *>(
2307
2308 if (TLOF->IsConstantInSmallSection(DAG.getDataLayout(), N->getConstVal(),
2310 // %gp_rel relocation
2311 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2312
2313 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2314 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2315 }
2316
2317 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2318}
2319
2320SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2322 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
2323
2324 SDLoc DL(Op);
2325 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2327
2328 // vastart just stores the address of the VarArgsFrameIndex slot into the
2329 // memory location argument.
2330 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2331 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2332 MachinePointerInfo(SV));
2333}
2334
2335SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2336 SDNode *Node = Op.getNode();
2337 EVT VT = Node->getValueType(0);
2338 SDValue Chain = Node->getOperand(0);
2339 SDValue VAListPtr = Node->getOperand(1);
2340 const Align Align =
2341 llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
2342 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2343 SDLoc DL(Node);
2344 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
2345
2346 SDValue VAListLoad = DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, Chain,
2347 VAListPtr, MachinePointerInfo(SV));
2348 SDValue VAList = VAListLoad;
2349
2350 // Re-align the pointer if necessary.
2351 // It should only ever be necessary for 64-bit types on O32 since the minimum
2352 // argument alignment is the same as the maximum type alignment for N32/N64.
2353 //
2354 // FIXME: We currently align too often. The code generator doesn't notice
2355 // when the pointer is still aligned from the last va_arg (or pair of
2356 // va_args for the i64 on O32 case).
2358 VAList = DAG.getNode(
2359 ISD::ADD, DL, VAList.getValueType(), VAList,
2360 DAG.getConstant(Align.value() - 1, DL, VAList.getValueType()));
2361
2362 VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
2363 DAG.getSignedConstant(-(int64_t)Align.value(), DL,
2364 VAList.getValueType()));
2365 }
2366
2367 // Increment the pointer, VAList, to the next vaarg.
2368 auto &TD = DAG.getDataLayout();
2369 unsigned ArgSizeInBytes =
2371 SDValue Tmp3 =
2372 DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
2373 DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
2374 DL, VAList.getValueType()));
2375 // Store the incremented VAList to the legalized pointer
2376 Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
2377 MachinePointerInfo(SV));
2378
2379 // In big-endian mode we must adjust the pointer when the load size is smaller
2380 // than the argument slot size. We must also reduce the known alignment to
2381 // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
2382 // the correct half of the slot, and reduce the alignment from 8 (slot
2383 // alignment) down to 4 (type alignment).
2384 if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2385 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2386 VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
2387 DAG.getIntPtrConstant(Adjustment, DL));
2388 }
2389 // Load the actual argument out of the pointer VAList
2390 return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo());
2391}
2392
2394 bool HasExtractInsert) {
2395 EVT TyX = Op.getOperand(0).getValueType();
2396 EVT TyY = Op.getOperand(1).getValueType();
2397 SDLoc DL(Op);
2398 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2399 SDValue Const31 = DAG.getConstant(31, DL, MVT::i32);
2400 SDValue Res;
2401
2402 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2403 // to i32.
2404 SDValue X = (TyX == MVT::f32) ?
2405 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
2406 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2407 Const1);
2408 SDValue Y = (TyY == MVT::f32) ?
2409 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
2410 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
2411 Const1);
2412
2413 if (HasExtractInsert) {
2414 // ext E, Y, 31, 1 ; extract bit31 of Y
2415 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
2416 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
2417 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
2418 } else {
2419 // sll SllX, X, 1
2420 // srl SrlX, SllX, 1
2421 // srl SrlY, Y, 31
2422 // sll SllY, SrlX, 31
2423 // or Or, SrlX, SllY
2424 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2425 SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2426 SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
2427 SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
2428 Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
2429 }
2430
2431 if (TyX == MVT::f32)
2432 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
2433
2434 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2435 Op.getOperand(0),
2436 DAG.getConstant(0, DL, MVT::i32));
2437 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2438}
2439
2441 bool HasExtractInsert) {
2442 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2443 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2444 EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
2445 SDLoc DL(Op);
2446 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2447
2448 // Bitcast to integer nodes.
2449 SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
2450 SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
2451
2452 if (HasExtractInsert) {
2453 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
2454 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
2455 SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
2456 DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);
2457
2458 if (WidthX > WidthY)
2459 E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
2460 else if (WidthY > WidthX)
2461 E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
2462
2463 SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
2464 DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,
2465 X);
2466 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
2467 }
2468
2469 // (d)sll SllX, X, 1
2470 // (d)srl SrlX, SllX, 1
2471 // (d)srl SrlY, Y, width(Y)-1
2472 // (d)sll SllY, SrlX, width(Y)-1
2473 // or Or, SrlX, SllY
2474 SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
2475 SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
2476 SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
2477 DAG.getConstant(WidthY - 1, DL, MVT::i32));
2478
2479 if (WidthX > WidthY)
2480 SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
2481 else if (WidthY > WidthX)
2482 SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
2483
2484 SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
2485 DAG.getConstant(WidthX - 1, DL, MVT::i32));
2486 SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
2487 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
2488}
2489
2490SDValue
2491MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2492 if (Subtarget.isGP64bit())
2494
2496}
2497
2498SDValue MipsTargetLowering::lowerFABS32(SDValue Op, SelectionDAG &DAG,
2499 bool HasExtractInsert) const {
2500 SDLoc DL(Op);
2501 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2502
2504 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2505
2506 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2507 // to i32.
2508 SDValue X = (Op.getValueType() == MVT::f32)
2509 ? DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0))
2510 : DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2511 Op.getOperand(0), Const1);
2512
2513 // Clear MSB.
2514 if (HasExtractInsert)
2515 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
2516 DAG.getRegister(Mips::ZERO, MVT::i32),
2517 DAG.getConstant(31, DL, MVT::i32), Const1, X);
2518 else {
2519 // TODO: Provide DAG patterns which transform (and x, cst)
2520 // back to a (shl (srl x (clz cst)) (clz cst)) sequence.
2521 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2522 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2523 }
2524
2525 if (Op.getValueType() == MVT::f32)
2526 return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
2527
2528 // FIXME: For mips32r2, the sequence of (BuildPairF64 (ins (ExtractElementF64
2529 // Op 1), $zero, 31 1) (ExtractElementF64 Op 0)) and the Op has one use, we
2530 // should be able to drop the usage of mfc1/mtc1 and rewrite the register in
2531 // place.
2532 SDValue LowX =
2533 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2534 DAG.getConstant(0, DL, MVT::i32));
2535 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2536}
2537
2538SDValue MipsTargetLowering::lowerFABS64(SDValue Op, SelectionDAG &DAG,
2539 bool HasExtractInsert) const {
2540 SDLoc DL(Op);
2541 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2542
2544 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2545
2546 // Bitcast to integer node.
2547 SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
2548
2549 // Clear MSB.
2550 if (HasExtractInsert)
2551 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
2552 DAG.getRegister(Mips::ZERO_64, MVT::i64),
2553 DAG.getConstant(63, DL, MVT::i32), Const1, X);
2554 else {
2555 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
2556 Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
2557 }
2558
2559 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
2560}
2561
2562SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
2563 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))
2564 return lowerFABS64(Op, DAG, Subtarget.hasExtractInsert());
2565
2566 return lowerFABS32(Op, DAG, Subtarget.hasExtractInsert());
2567}
2568
2569SDValue MipsTargetLowering::lowerFCANONICALIZE(SDValue Op,
2570 SelectionDAG &DAG) const {
2571 SDLoc DL(Op);
2572 EVT VT = Op.getValueType();
2573 SDValue Operand = Op.getOperand(0);
2574 SDNodeFlags Flags = Op->getFlags();
2575
2576 if (Flags.hasNoNaNs() || DAG.isKnownNeverNaN(Operand))
2577 return Operand;
2578
2579 SDValue Quiet = DAG.getNode(ISD::FADD, DL, VT, Operand, Operand);
2580 return DAG.getSelectCC(DL, Operand, Operand, Quiet, Operand, ISD::SETUO);
2581}
2582
2583SDValue MipsTargetLowering::
2584lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2585 // check the depth
2586 if (Op.getConstantOperandVal(0) != 0) {
2587 DAG.getContext()->emitError(
2588 "return address can be determined only for current frame");
2589 return SDValue();
2590 }
2591
2593 MFI.setFrameAddressIsTaken(true);
2594 EVT VT = Op.getValueType();
2595 SDLoc DL(Op);
2596 SDValue FrameAddr = DAG.getCopyFromReg(
2597 DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
2598 return FrameAddr;
2599}
2600
2601SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
2602 SelectionDAG &DAG) const {
2604 return SDValue();
2605
2606 // check the depth
2607 if (Op.getConstantOperandVal(0) != 0) {
2608 DAG.getContext()->emitError(
2609 "return address can be determined only for current frame");
2610 return SDValue();
2611 }
2612
2614 MachineFrameInfo &MFI = MF.getFrameInfo();
2615 MVT VT = Op.getSimpleValueType();
2616 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2617 MFI.setReturnAddressIsTaken(true);
2618
2619 // Return RA, which contains the return address. Mark it an implicit live-in.
2621 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
2622}
2623
2624// An EH_RETURN is the result of lowering llvm.eh.return which in turn is
2625// generated from __builtin_eh_return (offset, handler)
2626// The effect of this is to adjust the stack pointer by "offset"
2627// and then branch to "handler".
2628SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
2629 const {
2632
2633 MipsFI->setCallsEhReturn();
2634 SDValue Chain = Op.getOperand(0);
2635 SDValue Offset = Op.getOperand(1);
2636 SDValue Handler = Op.getOperand(2);
2637 SDLoc DL(Op);
2638 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
2639
2640 // Store stack offset in V1, store jump target in V0. Glue CopyToReg and
2641 // EH_RETURN nodes, so that instructions are emitted back-to-back.
2642 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2643 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2644 Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
2645 Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
2646 return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
2647 DAG.getRegister(OffsetReg, Ty),
2648 DAG.getRegister(AddrReg, getPointerTy(MF.getDataLayout())),
2649 Chain.getValue(1));
2650}
2651
2652SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
2653 SelectionDAG &DAG) const {
2654 // FIXME: Need pseudo-fence for 'singlethread' fences
2655 // FIXME: Set SType for weaker fences where supported/appropriate.
2656 unsigned SType = 0;
2657 SDLoc DL(Op);
2658 return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
2659 DAG.getConstant(SType, DL, MVT::i32));
2660}
2661
2662SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
2663 SelectionDAG &DAG) const {
2664 SDLoc DL(Op);
2665 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2666
2667 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2668 SDValue Shamt = Op.getOperand(2);
2669 // if shamt < (VT.bits):
2670 // lo = (shl lo, shamt)
2671 // hi = (or (shl hi, shamt) (srl (srl lo, 1), (xor shamt, (VT.bits-1))))
2672 // else:
2673 // lo = 0
2674 // hi = (shl lo, shamt[4:0])
2675 SDValue Not =
2676 DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2677 DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32));
2678 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
2679 DAG.getConstant(1, DL, VT));
2680 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
2681 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2682 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2683 SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2684 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2685 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2686 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2687 DAG.getConstant(0, DL, VT), ShiftLeftLo);
2688 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
2689
2690 SDValue Ops[2] = {Lo, Hi};
2691 return DAG.getMergeValues(Ops, DL);
2692}
2693
2694SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2695 bool IsSRA) const {
2696 SDLoc DL(Op);
2697 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2698 SDValue Shamt = Op.getOperand(2);
2699 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2700
2701 // if shamt < (VT.bits):
2702 // lo = (or (shl (shl hi, 1), (xor shamt, (VT.bits-1))) (srl lo, shamt))
2703 // if isSRA:
2704 // hi = (sra hi, shamt)
2705 // else:
2706 // hi = (srl hi, shamt)
2707 // else:
2708 // if isSRA:
2709 // lo = (sra hi, shamt[4:0])
2710 // hi = (sra hi, 31)
2711 // else:
2712 // lo = (srl hi, shamt[4:0])
2713 // hi = 0
2714 SDValue Not =
2715 DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2716 DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32));
2717 SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
2718 DAG.getConstant(1, DL, VT));
2719 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
2720 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2721 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2722 SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
2723 DL, VT, Hi, Shamt);
2724 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2725 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2726 SDValue Ext = DAG.getNode(ISD::SRA, DL, VT, Hi,
2727 DAG.getConstant(VT.getSizeInBits() - 1, DL, VT));
2728
2729 if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) {
2730 SDVTList VTList = DAG.getVTList(VT, VT);
2733 DL, VTList, Cond, ShiftRightHi,
2734 IsSRA ? Ext : DAG.getConstant(0, DL, VT), Or,
2735 ShiftRightHi);
2736 }
2737
2738 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
2739 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2740 IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);
2741
2742 SDValue Ops[2] = {Lo, Hi};
2743 return DAG.getMergeValues(Ops, DL);
2744}
2745
2746static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2747 SDValue Chain, SDValue Src, unsigned Offset) {
2748 SDValue Ptr = LD->getBasePtr();
2749 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2750 EVT BasePtrVT = Ptr.getValueType();
2751 SDLoc DL(LD);
2752 SDVTList VTList = DAG.getVTList(VT, MVT::Other);
2753
2754 if (Offset)
2755 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2756 DAG.getConstant(Offset, DL, BasePtrVT));
2757
2758 SDValue Ops[] = { Chain, Ptr, Src };
2759 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2760 LD->getMemOperand());
2761}
2762
2763// Expand an unaligned 32 or 64-bit integer load node.
2765 LoadSDNode *LD = cast<LoadSDNode>(Op);
2766 EVT MemVT = LD->getMemoryVT();
2767
2769 return Op;
2770
2771 // Return if load is aligned or if MemVT is neither i32 nor i64.
2772 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||
2773 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2774 return SDValue();
2775
2776 bool IsLittle = Subtarget.isLittle();
2777 EVT VT = Op.getValueType();
2778 ISD::LoadExtType ExtType = LD->getExtensionType();
2779 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2780
2781 assert((VT == MVT::i32) || (VT == MVT::i64));
2782
2783 // Expand
2784 // (set dst, (i64 (load baseptr)))
2785 // to
2786 // (set tmp, (ldl (add baseptr, 7), undef))
2787 // (set dst, (ldr baseptr, tmp))
2788 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2789 SDValue LDL = createLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
2790 IsLittle ? 7 : 0);
2791 return createLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
2792 IsLittle ? 0 : 7);
2793 }
2794
2795 SDValue LWL = createLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
2796 IsLittle ? 3 : 0);
2797 SDValue LWR = createLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
2798 IsLittle ? 0 : 3);
2799
2800 // Expand
2801 // (set dst, (i32 (load baseptr))) or
2802 // (set dst, (i64 (sextload baseptr))) or
2803 // (set dst, (i64 (extload baseptr)))
2804 // to
2805 // (set tmp, (lwl (add baseptr, 3), undef))
2806 // (set dst, (lwr baseptr, tmp))
2807 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2808 (ExtType == ISD::EXTLOAD))
2809 return LWR;
2810
2811 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2812
2813 // Expand
2814 // (set dst, (i64 (zextload baseptr)))
2815 // to
2816 // (set tmp0, (lwl (add baseptr, 3), undef))
2817 // (set tmp1, (lwr baseptr, tmp0))
2818 // (set tmp2, (shl tmp1, 32))
2819 // (set dst, (srl tmp2, 32))
2820 SDLoc DL(LD);
2821 SDValue Const32 = DAG.getConstant(32, DL, MVT::i32);
2822 SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
2823 SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
2824 SDValue Ops[] = { SRL, LWR.getValue(1) };
2825 return DAG.getMergeValues(Ops, DL);
2826}
2827
2828static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2829 SDValue Chain, unsigned Offset) {
2830 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2831 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2832 SDLoc DL(SD);
2833 SDVTList VTList = DAG.getVTList(MVT::Other);
2834
2835 if (Offset)
2836 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2837 DAG.getConstant(Offset, DL, BasePtrVT));
2838
2839 SDValue Ops[] = { Chain, Value, Ptr };
2840 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2841 SD->getMemOperand());
2842}
2843
2844// Expand an unaligned 32 or 64-bit integer store node.
2846 bool IsLittle) {
2847 SDValue Value = SD->getValue(), Chain = SD->getChain();
2848 EVT VT = Value.getValueType();
2849
2850 // Expand
2851 // (store val, baseptr) or
2852 // (truncstore val, baseptr)
2853 // to
2854 // (swl val, (add baseptr, 3))
2855 // (swr val, baseptr)
2856 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2857 SDValue SWL = createStoreLR(MipsISD::SWL, DAG, SD, Chain,
2858 IsLittle ? 3 : 0);
2859 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2860 }
2861
2862 assert(VT == MVT::i64);
2863
2864 // Expand
2865 // (store val, baseptr)
2866 // to
2867 // (sdl val, (add baseptr, 7))
2868 // (sdr val, baseptr)
2869 SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2870 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2871}
2872
2873// Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
2875 bool SingleFloat) {
2876 SDValue Val = SD->getValue();
2877
2878 if (Val.getOpcode() != ISD::FP_TO_SINT ||
2879 (Val.getValueSizeInBits() > 32 && SingleFloat))
2880 return SDValue();
2881
2883 SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
2884 Val.getOperand(0));
2885 return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
2886 SD->getPointerInfo(), SD->getAlign(),
2887 SD->getMemOperand()->getFlags());
2888}
2889
2891 StoreSDNode *SD = cast<StoreSDNode>(Op);
2892 EVT MemVT = SD->getMemoryVT();
2893
2894 // Lower unaligned integer stores.
2896 (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) &&
2897 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2898 return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
2899
2901}
2902
2903SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
2904 SelectionDAG &DAG) const {
2905
2906 // Return a fixed StackObject with offset 0 which points to the old stack
2907 // pointer.
2909 EVT ValTy = Op->getValueType(0);
2910 int FI = MFI.CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
2911 return DAG.getFrameIndex(FI, ValTy);
2912}
2913
2914SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
2915 SelectionDAG &DAG) const {
2916 if (Op.getValueSizeInBits() > 32 && Subtarget.isSingleFloat())
2917 return SDValue();
2918
2919 EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
2920 SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
2921 Op.getOperand(0));
2922 return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
2923}
2924
2925//===----------------------------------------------------------------------===//
2926// Calling Convention Implementation
2927//===----------------------------------------------------------------------===//
2928
2929//===----------------------------------------------------------------------===//
2930// TODO: Implement a generic logic using tblgen that can support this.
2931// Mips O32 ABI rules:
2932// ---
2933// i32 - Passed in A0, A1, A2, A3 and stack
2934// f32 - Only passed in f32 registers if no int reg has been used yet to hold
2935// an argument. Otherwise, passed in A1, A2, A3 and stack.
2936// f64 - Only passed in two aliased f32 registers if no int reg has been used
2937// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2938// not used, it must be shadowed. If only A3 is available, shadow it and
2939// go to stack.
2940// vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
2941// vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
2942// with the remainder spilled to the stack.
2943// vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
2944// spilling the remainder to the stack.
2945//
2946// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2947//===----------------------------------------------------------------------===//
2948
2949static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2950 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2951 CCState &State, ArrayRef<MCPhysReg> F64Regs) {
2952 const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
2954
2955 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2956
2957 const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
2958
2959 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2960
2961 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2962
2963 // Do not process byval args here.
2964 if (ArgFlags.isByVal())
2965 return true;
2966
2967 // Promote i8 and i16
2968 if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
2969 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2970 LocVT = MVT::i32;
2971 if (ArgFlags.isSExt())
2972 LocInfo = CCValAssign::SExtUpper;
2973 else if (ArgFlags.isZExt())
2974 LocInfo = CCValAssign::ZExtUpper;
2975 else
2976 LocInfo = CCValAssign::AExtUpper;
2977 }
2978 }
2979
2980 // Promote i8 and i16
2981 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2982 LocVT = MVT::i32;
2983 if (ArgFlags.isSExt())
2984 LocInfo = CCValAssign::SExt;
2985 else if (ArgFlags.isZExt())
2986 LocInfo = CCValAssign::ZExt;
2987 else
2988 LocInfo = CCValAssign::AExt;
2989 }
2990
2991 unsigned Reg;
2992
2993 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
2994 // is true: function is vararg, argument is 3rd or higher, there is previous
2995 // argument which is not f32 or f64.
2996 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
2997 State.getFirstUnallocated(F32Regs) != ValNo;
2998 Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
2999 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));
3000 bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
3001
3002 // The MIPS vector ABI for floats passes them in a pair of registers
3003 if (ValVT == MVT::i32 && isVectorFloat) {
3004 // This is the start of an vector that was scalarized into an unknown number
3005 // of components. It doesn't matter how many there are. Allocate one of the
3006 // notional 8 byte aligned registers which map onto the argument stack, and
3007 // shadow the register lost to alignment requirements.
3008 if (ArgFlags.isSplit()) {
3009 Reg = State.AllocateReg(FloatVectorIntRegs);
3010 if (Reg == Mips::A2)
3011 State.AllocateReg(Mips::A1);
3012 else if (Reg == 0)
3013 State.AllocateReg(Mips::A3);
3014 } else {
3015 // If we're an intermediate component of the split, we can just attempt to
3016 // allocate a register directly.
3017 Reg = State.AllocateReg(IntRegs);
3018 }
3019 } else if (ValVT == MVT::i32 ||
3020 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
3021 Reg = State.AllocateReg(IntRegs);
3022 // If this is the first part of an i64 arg,
3023 // the allocated register must be either A0 or A2.
3024 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
3025 Reg = State.AllocateReg(IntRegs);
3026 LocVT = MVT::i32;
3027 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
3028 // Allocate int register and shadow next int register. If first
3029 // available register is Mips::A1 or Mips::A3, shadow it too.
3030 Reg = State.AllocateReg(IntRegs);
3031 if (Reg == Mips::A1 || Reg == Mips::A3)
3032 Reg = State.AllocateReg(IntRegs);
3033
3034 if (Reg) {
3035 LocVT = MVT::i32;
3036
3037 State.addLoc(
3038 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
3039 MCRegister HiReg = State.AllocateReg(IntRegs);
3040 assert(HiReg);
3041 State.addLoc(
3042 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
3043 return false;
3044 }
3045 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
3046 // we are guaranteed to find an available float register
3047 if (ValVT == MVT::f32) {
3048 Reg = State.AllocateReg(F32Regs);
3049 // Shadow int register
3050 State.AllocateReg(IntRegs);
3051 } else {
3052 Reg = State.AllocateReg(F64Regs);
3053 // Shadow int registers
3054 MCRegister Reg2 = State.AllocateReg(IntRegs);
3055 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
3056 State.AllocateReg(IntRegs);
3057 State.AllocateReg(IntRegs);
3058 }
3059 } else
3060 llvm_unreachable("Cannot handle this ValVT.");
3061
3062 if (!Reg) {
3063 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
3064 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
3065 } else
3066 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
3067
3068 return false;
3069}
3070
3071static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
3072 MVT LocVT, CCValAssign::LocInfo LocInfo,
3073 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3074 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3075
3076 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3077}
3078
3079static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
3080 MVT LocVT, CCValAssign::LocInfo LocInfo,
3081 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3082 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3083
3084 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3085}
3086
3087static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
3088 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
3090
3091#include "MipsGenCallingConv.inc"
3092
3094 return CC_Mips_FixedArg;
3095 }
3096
3098 return RetCC_Mips;
3099 }
3100//===----------------------------------------------------------------------===//
3101// Call Calling Convention Implementation
3102//===----------------------------------------------------------------------===//
3103
3104SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
3105 SDValue Chain, SDValue Arg,
3106 const SDLoc &DL, bool IsTailCall,
3107 SelectionDAG &DAG) const {
3108 if (!IsTailCall) {
3109 SDValue PtrOff =
3110 DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
3112 return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo());
3113 }
3114
3116 int FI = MFI.CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
3117 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3118 return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(), MaybeAlign(),
3120}
3121
3124 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3125 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
3126 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
3127 SDValue Chain) const {
3128 // Insert node "GP copy globalreg" before call to function.
3129 //
3130 // R_MIPS_CALL* operators (emitted when non-internal functions are called
3131 // in PIC mode) allow symbols to be resolved via lazy binding.
3132 // The lazy binding stub requires GP to point to the GOT.
3133 // Note that we don't need GP to point to the GOT for indirect calls
3134 // (when R_MIPS_CALL* is not used for the call) because Mips linker generates
3135 // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs
3136 // used for the function (that is, Mips linker doesn't generate lazy binding
3137 // stub for a function whose address is taken in the program).
3138 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3139 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3140 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
3141 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
3142 }
3143
3144 // Build a sequence of copy-to-reg nodes chained together with token
3145 // chain and flag operands which copy the outgoing args into registers.
3146 // The InGlue in necessary since all emitted instructions must be
3147 // stuck together.
3148 SDValue InGlue;
3149
3150 for (auto &R : RegsToPass) {
3151 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InGlue);
3152 InGlue = Chain.getValue(1);
3153 }
3154
3155 // Add argument registers to the end of the list so that they are
3156 // known live into the call.
3157 for (auto &R : RegsToPass)
3158 Ops.push_back(CLI.DAG.getRegister(R.first, R.second.getValueType()));
3159
3160 // Add a register mask operand representing the call-preserved registers.
3162 const uint32_t *Mask =
3163 TRI->getCallPreservedMask(CLI.DAG.getMachineFunction(), CLI.CallConv);
3164 assert(Mask && "Missing call preserved mask for calling convention");
3166 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
3167 StringRef Sym = G->getGlobal()->getName();
3168 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
3169 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
3171 }
3172 }
3173 }
3174 Ops.push_back(CLI.DAG.getRegisterMask(Mask));
3175
3176 if (InGlue.getNode())
3177 Ops.push_back(InGlue);
3178}
3179
3181 SDNode *Node) const {
3182 switch (MI.getOpcode()) {
3183 default:
3184 return;
3185 case Mips::JALR:
3186 case Mips::JALRPseudo:
3187 case Mips::JALR64:
3188 case Mips::JALR64Pseudo:
3189 case Mips::JALR16_MM:
3190 case Mips::JALRC16_MMR6:
3191 case Mips::TAILCALLREG:
3192 case Mips::TAILCALLREG64:
3193 case Mips::TAILCALLR6REG:
3194 case Mips::TAILCALL64R6REG:
3195 case Mips::TAILCALLREG_MM:
3196 case Mips::TAILCALLREG_MMR6: {
3197 if (!EmitJalrReloc ||
3200 Node->getNumOperands() < 1 ||
3201 Node->getOperand(0).getNumOperands() < 2) {
3202 return;
3203 }
3204 // We are after the callee address, set by LowerCall().
3205 // If added to MI, asm printer will emit .reloc R_MIPS_JALR for the
3206 // symbol.
3207 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3208 StringRef Sym;
3209 if (const GlobalAddressSDNode *G =
3210 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3211 // We must not emit the R_MIPS_JALR relocation against data symbols
3212 // since this will cause run-time crashes if the linker replaces the
3213 // call instruction with a relative branch to the data symbol.
3214 if (!isa<Function>(G->getGlobal())) {
3215 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
3216 << G->getGlobal()->getName() << "\n");
3217 return;
3218 }
3219 Sym = G->getGlobal()->getName();
3220 }
3221 else if (const ExternalSymbolSDNode *ES =
3222 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3223 Sym = ES->getSymbol();
3224 }
3225
3226 if (Sym.empty())
3227 return;
3228
3229 MachineFunction *MF = MI.getParent()->getParent();
3231 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
3233 }
3234 }
3235}
3236
3237/// LowerCall - functions arguments are copied from virtual regs to
3238/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
3239SDValue
3240MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3241 SmallVectorImpl<SDValue> &InVals) const {
3242 SelectionDAG &DAG = CLI.DAG;
3243 SDLoc DL = CLI.DL;
3245 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3247 SDValue Chain = CLI.Chain;
3248 SDValue Callee = CLI.Callee;
3249 bool &IsTailCall = CLI.IsTailCall;
3250 CallingConv::ID CallConv = CLI.CallConv;
3251 bool IsVarArg = CLI.IsVarArg;
3252
3254 MachineFrameInfo &MFI = MF.getFrameInfo();
3256 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
3257 bool IsPIC = isPositionIndependent();
3258
3259 // Analyze operands of the call, assigning locations to each operand.
3261 MipsCCState CCInfo(
3262 CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext(),
3264
3265 const ExternalSymbolSDNode *ES =
3266 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3267
3268 // There is one case where CALLSEQ_START..CALLSEQ_END can be nested, which
3269 // is during the lowering of a call with a byval argument which produces
3270 // a call to memcpy. For the O32 case, this causes the caller to allocate
3271 // stack space for the reserved argument area for the callee, then recursively
3272 // again for the memcpy call. In the NEWABI case, this doesn't occur as those
3273 // ABIs mandate that the callee allocates the reserved argument area. We do
3274 // still produce nested CALLSEQ_START..CALLSEQ_END with zero space though.
3275 //
3276 // If the callee has a byval argument and memcpy is used, we are mandated
3277 // to already have produced a reserved argument area for the callee for O32.
3278 // Therefore, the reserved argument area can be reused for both calls.
3279 //
3280 // Other cases of calling memcpy cannot have a chain with a CALLSEQ_START
3281 // present, as we have yet to hook that node onto the chain.
3282 //
3283 // Hence, the CALLSEQ_START and CALLSEQ_END nodes can be eliminated in this
3284 // case. GCC does a similar trick, in that wherever possible, it calculates
3285 // the maximum out going argument area (including the reserved area), and
3286 // preallocates the stack space on entrance to the caller.
3287 //
3288 // FIXME: We should do the same for efficiency and space.
3289
3290 // Note: The check on the calling convention below must match
3291 // MipsABIInfo::GetCalleeAllocdArgSizeInBytes().
3292 bool MemcpyInByVal = ES && StringRef(ES->getSymbol()) == "memcpy" &&
3293 CallConv != CallingConv::Fast &&
3294 Chain.getOpcode() == ISD::CALLSEQ_START;
3295
3296 // Allocate the reserved argument area. It seems strange to do this from the
3297 // caller side but removing it breaks the frame size calculation.
3298 unsigned ReservedArgArea =
3299 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
3300 CCInfo.AllocateStack(ReservedArgArea, Align(1));
3301
3302 CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
3303 ES ? ES->getSymbol() : nullptr);
3304
3305 // Get a count of how many bytes are to be pushed on the stack.
3306 unsigned StackSize = CCInfo.getStackSize();
3307
3308 // Call site info for function parameters tracking.
3310
3311 // Check if it's really possible to do a tail call. Restrict it to functions
3312 // that are part of this compilation unit.
3313 bool InternalLinkage = false;
3314 if (IsTailCall) {
3315 IsTailCall = isEligibleForTailCallOptimization(
3316 CCInfo, StackSize, *MF.getInfo<MipsFunctionInfo>());
3317 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3318 InternalLinkage = G->getGlobal()->hasInternalLinkage();
3319 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
3320 G->getGlobal()->hasPrivateLinkage() ||
3321 G->getGlobal()->hasHiddenVisibility() ||
3322 G->getGlobal()->hasProtectedVisibility());
3323 }
3324 }
3325 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
3326 report_fatal_error("failed to perform tail call elimination on a call "
3327 "site marked musttail");
3328
3329 if (IsTailCall)
3330 ++NumTailCalls;
3331
3332 // Chain is the output chain of the last Load/Store or CopyToReg node.
3333 // ByValChain is the output chain of the last Memcpy node created for copying
3334 // byval arguments to the stack.
3335 unsigned StackAlignment = TFL->getStackAlignment();
3336 StackSize = alignTo(StackSize, StackAlignment);
3337
3338 if (!(IsTailCall || MemcpyInByVal))
3339 Chain = DAG.getCALLSEQ_START(Chain, StackSize, 0, DL);
3340
3342 DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP,
3344
3345 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3346 SmallVector<SDValue, 8> MemOpChains;
3347
3348 CCInfo.rewindByValRegsInfo();
3349
3350 // Walk the register/memloc assignments, inserting copies/loads.
3351 for (unsigned i = 0, e = ArgLocs.size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3352 SDValue Arg = OutVals[OutIdx];
3353 CCValAssign &VA = ArgLocs[i];
3354 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
3355 ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
3356 bool UseUpperBits = false;
3357
3358 // ByVal Arg.
3359 if (Flags.isByVal()) {
3360 unsigned FirstByValReg, LastByValReg;
3361 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3362 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3363
3364 assert(Flags.getByValSize() &&
3365 "ByVal args of size 0 should have been ignored by front-end.");
3366 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3367 assert(!IsTailCall &&
3368 "Do not tail-call optimize if there is a byval argument.");
3369 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3370 FirstByValReg, LastByValReg, Flags, Subtarget.isLittle(),
3371 VA);
3372 CCInfo.nextInRegsParam();
3373 continue;
3374 }
3375
3376 // Promote the value if needed.
3377 switch (VA.getLocInfo()) {
3378 default:
3379 llvm_unreachable("Unknown loc info!");
3380 case CCValAssign::Full:
3381 if (VA.isRegLoc()) {
3382 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3383 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3384 (ValVT == MVT::i64 && LocVT == MVT::f64))
3385 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3386 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3388 Arg, DAG.getConstant(0, DL, MVT::i32));
3390 Arg, DAG.getConstant(1, DL, MVT::i32));
3391 if (!Subtarget.isLittle())
3392 std::swap(Lo, Hi);
3393
3394 assert(VA.needsCustom());
3395
3396 Register LocRegLo = VA.getLocReg();
3397 Register LocRegHigh = ArgLocs[++i].getLocReg();
3398 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
3399 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
3400 continue;
3401 }
3402 }
3403 break;
3404 case CCValAssign::BCvt:
3405 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3406 break;
3408 UseUpperBits = true;
3409 [[fallthrough]];
3410 case CCValAssign::SExt:
3411 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
3412 break;
3414 UseUpperBits = true;
3415 [[fallthrough]];
3416 case CCValAssign::ZExt:
3417 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
3418 break;
3420 UseUpperBits = true;
3421 [[fallthrough]];
3422 case CCValAssign::AExt:
3423 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
3424 break;
3425 }
3426
3427 if (UseUpperBits) {
3428 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3429 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3430 Arg = DAG.getNode(
3431 ISD::SHL, DL, VA.getLocVT(), Arg,
3432 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3433 }
3434
3435 // Arguments that can be passed on register must be kept at
3436 // RegsToPass vector
3437 if (VA.isRegLoc()) {
3438 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3439
3440 // If the parameter is passed through reg $D, which splits into
3441 // two physical registers, avoid creating call site info.
3442 if (Mips::AFGR64RegClass.contains(VA.getLocReg()))
3443 continue;
3444
3445 // Collect CSInfo about which register passes which parameter.
3446 const TargetOptions &Options = DAG.getTarget().Options;
3447 if (Options.EmitCallSiteInfo)
3448 CSInfo.ArgRegPairs.emplace_back(VA.getLocReg(), i);
3449
3450 continue;
3451 }
3452
3453 // Register can't get to this point...
3454 assert(VA.isMemLoc());
3455
3456 // emit ISD::STORE whichs stores the
3457 // parameter value to a stack Location
3458 MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(),
3459 Chain, Arg, DL, IsTailCall, DAG));
3460 }
3461
3462 // Transform all store nodes into one single node because all store
3463 // nodes are independent of each other.
3464 if (!MemOpChains.empty())
3465 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3466
3467 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
3468 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
3469 // node so that legalize doesn't hack it.
3470
3471 EVT Ty = Callee.getValueType();
3472 bool GlobalOrExternal = false, IsCallReloc = false;
3473
3474 // The long-calls feature is ignored in case of PIC.
3475 // While we do not support -mshared / -mno-shared properly,
3476 // ignore long-calls in case of -mabicalls too.
3477 if (!Subtarget.isABICalls() && !IsPIC) {
3478 // If the function should be called using "long call",
3479 // get its address into a register to prevent using
3480 // of the `jal` instruction for the direct call.
3481 if (auto *N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3482 if (Subtarget.useLongCalls())
3484 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3485 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3486 } else if (auto *N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3487 bool UseLongCalls = Subtarget.useLongCalls();
3488 // If the function has long-call/far/near attribute
3489 // it overrides command line switch pased to the backend.
3490 if (auto *F = dyn_cast<Function>(N->getGlobal())) {
3491 if (F->hasFnAttribute("long-call"))
3492 UseLongCalls = true;
3493 else if (F->hasFnAttribute("short-call"))
3494 UseLongCalls = false;
3495 }
3496 if (UseLongCalls)
3498 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3499 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3500 }
3501 }
3502
3503 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3504 if (IsPIC) {
3505 const GlobalValue *Val = G->getGlobal();
3506 InternalLinkage = Val->hasInternalLinkage();
3507
3508 if (InternalLinkage)
3509 Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
3510 else if (Subtarget.useXGOT()) {
3512 MipsII::MO_CALL_LO16, Chain,
3513 FuncInfo->callPtrInfo(MF, Val));
3514 IsCallReloc = true;
3515 } else {
3516 Callee = getAddrGlobal(G, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3517 FuncInfo->callPtrInfo(MF, Val));
3518 IsCallReloc = true;
3519 }
3520 } else
3521 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL,
3522 getPointerTy(DAG.getDataLayout()), 0,
3524 GlobalOrExternal = true;
3525 }
3526 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3527 const char *Sym = S->getSymbol();
3528
3529 if (!IsPIC) // static
3532 else if (Subtarget.useXGOT()) {
3534 MipsII::MO_CALL_LO16, Chain,
3535 FuncInfo->callPtrInfo(MF, Sym));
3536 IsCallReloc = true;
3537 } else { // PIC
3538 Callee = getAddrGlobal(S, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3539 FuncInfo->callPtrInfo(MF, Sym));
3540 IsCallReloc = true;
3541 }
3542
3543 GlobalOrExternal = true;
3544 }
3545
3546 SmallVector<SDValue, 8> Ops(1, Chain);
3547 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3548
3549 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3550 IsCallReloc, CLI, Callee, Chain);
3551
3552 if (IsTailCall) {
3554 SDValue Ret = DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
3555 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
3556 return Ret;
3557 }
3558
3559 Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
3560 SDValue InGlue = Chain.getValue(1);
3561
3562 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
3563
3564 // Create the CALLSEQ_END node in the case of where it is not a call to
3565 // memcpy.
3566 if (!(MemcpyInByVal)) {
3567 Chain = DAG.getCALLSEQ_END(Chain, StackSize, 0, InGlue, DL);
3568 InGlue = Chain.getValue(1);
3569 }
3570
3571 // Handle result values, copying them out of physregs into vregs that we
3572 // return.
3573 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
3574 InVals, CLI);
3575}
3576
3577/// LowerCallResult - Lower the result values of a call into the
3578/// appropriate copies out of appropriate physical registers.
3579SDValue MipsTargetLowering::LowerCallResult(
3580 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
3581 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3584 // Assign locations to each value returned by this call.
3586 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3587 *DAG.getContext());
3588
3589 const ExternalSymbolSDNode *ES =
3590 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.Callee.getNode());
3591 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,
3592 ES ? ES->getSymbol() : nullptr);
3593
3594 // Copy all of the result registers out of their specified physreg.
3595 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3596 CCValAssign &VA = RVLocs[i];
3597 assert(VA.isRegLoc() && "Can only return in registers!");
3598
3599 SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
3600 RVLocs[i].getLocVT(), InGlue);
3601 Chain = Val.getValue(1);
3602 InGlue = Val.getValue(2);
3603
3604 if (VA.isUpperBitsInLoc()) {
3605 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3606 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3607 unsigned Shift =
3609 Val = DAG.getNode(
3610 Shift, DL, VA.getLocVT(), Val,
3611 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3612 }
3613
3614 switch (VA.getLocInfo()) {
3615 default:
3616 llvm_unreachable("Unknown loc info!");
3617 case CCValAssign::Full:
3618 break;
3619 case CCValAssign::BCvt:
3620 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
3621 break;
3622 case CCValAssign::AExt:
3624 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3625 break;
3626 case CCValAssign::ZExt:
3628 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
3629 DAG.getValueType(VA.getValVT()));
3630 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3631 break;
3632 case CCValAssign::SExt:
3634 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
3635 DAG.getValueType(VA.getValVT()));
3636 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3637 break;
3638 }
3639
3640 InVals.push_back(Val);
3641 }
3642
3643 return Chain;
3644}
3645
3647 EVT ArgVT, const SDLoc &DL,
3648 SelectionDAG &DAG) {
3649 MVT LocVT = VA.getLocVT();
3650 EVT ValVT = VA.getValVT();
3651
3652 // Shift into the upper bits if necessary.
3653 switch (VA.getLocInfo()) {
3654 default:
3655 break;
3659 unsigned ValSizeInBits = ArgVT.getSizeInBits();
3660 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3661 unsigned Opcode =
3663 Val = DAG.getNode(
3664 Opcode, DL, VA.getLocVT(), Val,
3665 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3666 break;
3667 }
3668 }
3669
3670 // If this is an value smaller than the argument slot size (32-bit for O32,
3671 // 64-bit for N32/N64), it has been promoted in some way to the argument slot
3672 // size. Extract the value and insert any appropriate assertions regarding
3673 // sign/zero extension.
3674 switch (VA.getLocInfo()) {
3675 default:
3676 llvm_unreachable("Unknown loc info!");
3677 case CCValAssign::Full:
3678 break;
3680 case CCValAssign::AExt:
3681 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3682 break;
3684 case CCValAssign::SExt:
3685 Val = DAG.getNode(ISD::AssertSext, DL, LocVT, Val, DAG.getValueType(ValVT));
3686 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3687 break;
3689 case CCValAssign::ZExt:
3690 Val = DAG.getNode(ISD::AssertZext, DL, LocVT, Val, DAG.getValueType(ValVT));
3691 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3692 break;
3693 case CCValAssign::BCvt:
3694 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
3695 break;
3696 }
3697
3698 return Val;
3699}
3700
3701//===----------------------------------------------------------------------===//
3702// Formal Arguments Calling Convention Implementation
3703//===----------------------------------------------------------------------===//
3704/// LowerFormalArguments - transform physical registers into virtual registers
3705/// and generate load operations for arguments places on the stack.
3706SDValue MipsTargetLowering::LowerFormalArguments(
3707 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3708 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3709 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3711 MachineFrameInfo &MFI = MF.getFrameInfo();
3713
3714 MipsFI->setVarArgsFrameIndex(0);
3715
3716 // Used with vargs to acumulate store chains.
3717 std::vector<SDValue> OutChains;
3718
3719 // Assign locations to all of the incoming arguments.
3721 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
3722 *DAG.getContext());
3723 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1));
3725 Function::const_arg_iterator FuncArg = Func.arg_begin();
3726
3727 if (Func.hasFnAttribute("interrupt") && !Func.arg_empty())
3729 "Functions with the interrupt attribute cannot have arguments!");
3730
3731 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3732 MipsFI->setFormalArgInfo(CCInfo.getStackSize(),
3733 CCInfo.getInRegsParamsCount() > 0);
3734
3735 unsigned CurArgIdx = 0;
3736 CCInfo.rewindByValRegsInfo();
3737
3738 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3739 CCValAssign &VA = ArgLocs[i];
3740 if (Ins[InsIdx].isOrigArg()) {
3741 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3742 CurArgIdx = Ins[InsIdx].getOrigArgIndex();
3743 }
3744 EVT ValVT = VA.getValVT();
3745 ISD::ArgFlagsTy Flags = Ins[InsIdx].Flags;
3746 bool IsRegLoc = VA.isRegLoc();
3747
3748 if (Flags.isByVal()) {
3749 assert(Ins[InsIdx].isOrigArg() && "Byval arguments cannot be implicit");
3750 unsigned FirstByValReg, LastByValReg;
3751 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3752 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3753
3754 assert(Flags.getByValSize() &&
3755 "ByVal args of size 0 should have been ignored by front-end.");
3756 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3757 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3758 FirstByValReg, LastByValReg, VA, CCInfo);
3759 CCInfo.nextInRegsParam();
3760 continue;
3761 }
3762
3763 // Arguments stored on registers
3764 if (IsRegLoc) {
3765 MVT RegVT = VA.getLocVT();
3766 Register ArgReg = VA.getLocReg();
3767 const TargetRegisterClass *RC = getRegClassFor(RegVT);
3768
3769 // Transform the arguments stored on
3770 // physical registers into virtual ones
3771 unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC);
3772 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
3773
3774 ArgValue =
3775 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3776
3777 // Handle floating point arguments passed in integer registers and
3778 // long double arguments passed in floating point registers.
3779 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3780 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3781 (RegVT == MVT::f64 && ValVT == MVT::i64))
3782 ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
3783 else if (ABI.IsO32() && RegVT == MVT::i32 &&
3784 ValVT == MVT::f64) {
3785 assert(VA.needsCustom() && "Expected custom argument for f64 split");
3786 CCValAssign &NextVA = ArgLocs[++i];
3787 unsigned Reg2 =
3788 addLiveIn(DAG.getMachineFunction(), NextVA.getLocReg(), RC);
3789 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
3790 if (!Subtarget.isLittle())
3791 std::swap(ArgValue, ArgValue2);
3792 ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
3793 ArgValue, ArgValue2);
3794 }
3795
3796 InVals.push_back(ArgValue);
3797 } else { // VA.isRegLoc()
3798 MVT LocVT = VA.getLocVT();
3799
3800 assert(!VA.needsCustom() && "unexpected custom memory argument");
3801
3802 // Only arguments pased on the stack should make it here.
3803 assert(VA.isMemLoc());
3804
3805 // The stack pointer offset is relative to the caller stack frame.
3806 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
3807 VA.getLocMemOffset(), true);
3808
3809 // Create load nodes to retrieve arguments from the stack
3810 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3811 SDValue ArgValue = DAG.getLoad(
3812 LocVT, DL, Chain, FIN,
3814 OutChains.push_back(ArgValue.getValue(1));
3815
3816 ArgValue =
3817 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3818
3819 InVals.push_back(ArgValue);
3820 }
3821 }
3822
3823 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3824
3825 if (ArgLocs[i].needsCustom()) {
3826 ++i;
3827 continue;
3828 }
3829
3830 // The mips ABIs for returning structs by value requires that we copy
3831 // the sret argument into $v0 for the return. Save the argument into
3832 // a virtual register so that we can access it from the return points.
3833 if (Ins[InsIdx].Flags.isSRet()) {
3834 unsigned Reg = MipsFI->getSRetReturnReg();
3835 if (!Reg) {
3837 getRegClassFor(ABI.IsN64() ? MVT::i64 : MVT::i32));
3838 MipsFI->setSRetReturnReg(Reg);
3839 }
3840 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
3841 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
3842 break;
3843 }
3844 }
3845
3846 if (IsVarArg)
3847 writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);
3848
3849 // All stores are grouped in one node to allow the matching between
3850 // the size of Ins and InVals. This only happens when on varg functions
3851 if (!OutChains.empty()) {
3852 OutChains.push_back(Chain);
3853 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
3854 }
3855
3856 return Chain;
3857}
3858
3859//===----------------------------------------------------------------------===//
3860// Return Value Calling Convention Implementation
3861//===----------------------------------------------------------------------===//
3862
3863bool
3864MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3865 MachineFunction &MF, bool IsVarArg,
3867 LLVMContext &Context) const {
3869 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3870 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3871}
3872
3873bool MipsTargetLowering::shouldSignExtendTypeInLibCall(Type *Ty,
3874 bool IsSigned) const {
3875 if ((ABI.IsN32() || ABI.IsN64()) && Ty->isIntegerTy(32))
3876 return true;
3877
3878 return IsSigned;
3879}
3880
3881SDValue
3882MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
3883 const SDLoc &DL,
3884 SelectionDAG &DAG) const {
3887
3888 MipsFI->setISR();
3889
3890 return DAG.getNode(MipsISD::ERet, DL, MVT::Other, RetOps);
3891}
3892
3893SDValue
3894MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3895 bool IsVarArg,
3897 const SmallVectorImpl<SDValue> &OutVals,
3898 const SDLoc &DL, SelectionDAG &DAG) const {
3899 // CCValAssign - represent the assignment of
3900 // the return value to a location
3903
3904 // CCState - Info about the registers and stack slot.
3905 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3906
3907 // Analyze return values.
3908 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3909
3910 SDValue Glue;
3911 SmallVector<SDValue, 4> RetOps(1, Chain);
3912
3913 // Copy the result values into the output registers.
3914 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3915 SDValue Val = OutVals[i];
3916 CCValAssign &VA = RVLocs[i];
3917 assert(VA.isRegLoc() && "Can only return in registers!");
3918 bool UseUpperBits = false;
3919
3920 switch (VA.getLocInfo()) {
3921 default:
3922 llvm_unreachable("Unknown loc info!");
3923 case CCValAssign::Full:
3924 break;
3925 case CCValAssign::BCvt:
3926 Val = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Val);
3927 break;
3929 UseUpperBits = true;
3930 [[fallthrough]];
3931 case CCValAssign::AExt:
3932 Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
3933 break;
3935 UseUpperBits = true;
3936 [[fallthrough]];
3937 case CCValAssign::ZExt:
3938 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
3939 break;
3941 UseUpperBits = true;
3942 [[fallthrough]];
3943 case CCValAssign::SExt:
3944 Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
3945 break;
3946 }
3947
3948 if (UseUpperBits) {
3949 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3950 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3951 Val = DAG.getNode(
3952 ISD::SHL, DL, VA.getLocVT(), Val,
3953 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3954 }
3955
3956 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
3957
3958 // Guarantee that all emitted copies are stuck together with flags.
3959 Glue = Chain.getValue(1);
3960 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3961 }
3962
3963 // The mips ABIs for returning structs by value requires that we copy
3964 // the sret argument into $v0 for the return. We saved the argument into
3965 // a virtual register in the entry block, so now we copy the value out
3966 // and into $v0.
3967 if (MF.getFunction().hasStructRetAttr()) {
3969 unsigned Reg = MipsFI->getSRetReturnReg();
3970
3971 if (!Reg)
3972 llvm_unreachable("sret virtual register not created in the entry block");
3973 SDValue Val =
3974 DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout()));
3975 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
3976
3977 Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Glue);
3978 Glue = Chain.getValue(1);
3979 RetOps.push_back(DAG.getRegister(V0, getPointerTy(DAG.getDataLayout())));
3980 }
3981
3982 RetOps[0] = Chain; // Update chain.
3983
3984 // Add the glue if we have it.
3985 if (Glue.getNode())
3986 RetOps.push_back(Glue);
3987
3988 // ISRs must use "eret".
3989 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
3990 return LowerInterruptReturn(RetOps, DL, DAG);
3991
3992 // Standard return on Mips is a "jr $ra"
3993 return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
3994}
3995
3996//===----------------------------------------------------------------------===//
3997// Mips Inline Assembly Support
3998//===----------------------------------------------------------------------===//
3999
4000/// getConstraintType - Given a constraint letter, return the type of
4001/// constraint it is for this target.
4003MipsTargetLowering::getConstraintType(StringRef Constraint) const {
4004 // Mips specific constraints
4005 // GCC config/mips/constraints.md
4006 //
4007 // 'd' : An address register. Equivalent to r
4008 // unless generating MIPS16 code.
4009 // 'y' : Equivalent to r; retained for
4010 // backwards compatibility.
4011 // 'c' : A register suitable for use in an indirect
4012 // jump. This will always be $25 for -mabicalls.
4013 // 'l' : The lo register. 1 word storage.
4014 // 'x' : The hilo register pair. Double word storage.
4015 if (Constraint.size() == 1) {
4016 switch (Constraint[0]) {
4017 default : break;
4018 case 'd':
4019 case 'y':
4020 case 'f':
4021 case 'c':
4022 case 'l':
4023 case 'x':
4024 return C_RegisterClass;
4025 case 'R':
4026 return C_Memory;
4027 }
4028 }
4029
4030 if (Constraint == "ZC")
4031 return C_Memory;
4032
4033 return TargetLowering::getConstraintType(Constraint);
4034}
4035
4036/// Examine constraint type and operand type and determine a weight value.
4037/// This object must already have been set up with the operand type
4038/// and the current alternative constraint selected.
4040MipsTargetLowering::getSingleConstraintMatchWeight(
4041 AsmOperandInfo &info, const char *constraint) const {
4043 Value *CallOperandVal = info.CallOperandVal;
4044 // If we don't have a value, we can't do a match,
4045 // but allow it at the lowest weight.
4046 if (!CallOperandVal)
4047 return CW_Default;
4048 Type *type = CallOperandVal->getType();
4049 // Look at the constraint type.
4050 switch (*constraint) {
4051 default:
4053 break;
4054 case 'd':
4055 case 'y':
4056 if (type->isIntegerTy())
4057 weight = CW_Register;
4058 break;
4059 case 'f': // FPU or MSA register
4060 if (Subtarget.hasMSA() && type->isVectorTy() &&
4061 type->getPrimitiveSizeInBits().getFixedValue() == 128)
4062 weight = CW_Register;
4063 else if (type->isFloatTy())
4064 weight = CW_Register;
4065 break;
4066 case 'c': // $25 for indirect jumps
4067 case 'l': // lo register
4068 case 'x': // hilo register pair
4069 if (type->isIntegerTy())
4070 weight = CW_SpecificReg;
4071 break;
4072 case 'I': // signed 16 bit immediate
4073 case 'J': // integer zero
4074 case 'K': // unsigned 16 bit immediate
4075 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4076 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4077 case 'O': // signed 15 bit immediate (+- 16383)
4078 case 'P': // immediate in the range of 65535 to 1 (inclusive)
4079 if (isa<ConstantInt>(CallOperandVal))
4080 weight = CW_Constant;
4081 break;
4082 case 'R':
4083 weight = CW_Memory;
4084 break;
4085 }
4086 return weight;
4087}
4088
4089/// This is a helper function to parse a physical register string and split it
4090/// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
4091/// that is returned indicates whether parsing was successful. The second flag
4092/// is true if the numeric part exists.
4093static std::pair<bool, bool> parsePhysicalReg(StringRef C, StringRef &Prefix,
4094 unsigned long long &Reg) {
4095 if (C.front() != '{' || C.back() != '}')
4096 return std::make_pair(false, false);
4097
4098 // Search for the first numeric character.
4099 StringRef::const_iterator I, B = C.begin() + 1, E = C.end() - 1;
4100 I = std::find_if(B, E, isdigit);
4101
4102 Prefix = StringRef(B, I - B);
4103
4104 // The second flag is set to false if no numeric characters were found.
4105 if (I == E)
4106 return std::make_pair(true, false);
4107
4108 // Parse the numeric characters.
4109 return std::make_pair(!getAsUnsignedInteger(StringRef(I, E - I), 10, Reg),
4110 true);
4111}
4112
4114 ISD::NodeType) const {
4115 bool Cond = !Subtarget.isABI_O32() && VT.getSizeInBits() == 32;
4116 EVT MinVT = getRegisterType(Cond ? MVT::i64 : MVT::i32);
4117 return VT.bitsLT(MinVT) ? MinVT : VT;
4118}
4119
4120std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
4121parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
4122 const TargetRegisterInfo *TRI =
4124 const TargetRegisterClass *RC;
4125 StringRef Prefix;
4126 unsigned long long Reg;
4127
4128 std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
4129
4130 if (!R.first)
4131 return std::make_pair(0U, nullptr);
4132
4133 if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
4134 // No numeric characters follow "hi" or "lo".
4135 if (R.second)
4136 return std::make_pair(0U, nullptr);
4137
4138 RC = TRI->getRegClass(Prefix == "hi" ?
4139 Mips::HI32RegClassID : Mips::LO32RegClassID);
4140 return std::make_pair(*(RC->begin()), RC);
4141 } else if (Prefix.starts_with("$msa")) {
4142 // Parse $msa(ir|csr|access|save|modify|request|map|unmap)
4143
4144 // No numeric characters follow the name.
4145 if (R.second)
4146 return std::make_pair(0U, nullptr);
4147
4149 .Case("$msair", Mips::MSAIR)
4150 .Case("$msacsr", Mips::MSACSR)
4151 .Case("$msaaccess", Mips::MSAAccess)
4152 .Case("$msasave", Mips::MSASave)
4153 .Case("$msamodify", Mips::MSAModify)
4154 .Case("$msarequest", Mips::MSARequest)
4155 .Case("$msamap", Mips::MSAMap)
4156 .Case("$msaunmap", Mips::MSAUnmap)
4157 .Default(0);
4158
4159 if (!Reg)
4160 return std::make_pair(0U, nullptr);
4161
4162 RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
4163 return std::make_pair(Reg, RC);
4164 }
4165
4166 if (!R.second)
4167 return std::make_pair(0U, nullptr);
4168
4169 if (Prefix == "$f") { // Parse $f0-$f31.
4170 // If the size of FP registers is 64-bit or Reg is an even number, select
4171 // the 64-bit register class. Otherwise, select the 32-bit register class.
4172 if (VT == MVT::Other)
4173 VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
4174
4175 RC = getRegClassFor(VT);
4176
4177 if (RC == &Mips::AFGR64RegClass) {
4178 assert(Reg % 2 == 0);
4179 Reg >>= 1;
4180 }
4181 } else if (Prefix == "$fcc") // Parse $fcc0-$fcc7.
4182 RC = TRI->getRegClass(Mips::FCCRegClassID);
4183 else if (Prefix == "$w") { // Parse $w0-$w31.
4184 RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);
4185 } else { // Parse $0-$31.
4186 assert(Prefix == "$");
4187 RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);
4188 }
4189
4190 assert(Reg < RC->getNumRegs());
4191 return std::make_pair(*(RC->begin() + Reg), RC);
4192}
4193
4194/// Given a register class constraint, like 'r', if this corresponds directly
4195/// to an LLVM register class, return a register of 0 and the register class
4196/// pointer.
4197std::pair<unsigned, const TargetRegisterClass *>
4198MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4199 StringRef Constraint,
4200 MVT VT) const {
4201 if (Constraint.size() == 1) {
4202 switch (Constraint[0]) {
4203 case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
4204 case 'y': // Same as 'r'. Exists for compatibility.
4205 case 'r':
4206 if ((VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 ||
4207 VT == MVT::i1) ||
4208 (VT == MVT::f32 && Subtarget.useSoftFloat())) {
4209 if (Subtarget.inMips16Mode())
4210 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
4211 return std::make_pair(0U, &Mips::GPR32RegClass);
4212 }
4213 if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) &&
4215 return std::make_pair(0U, &Mips::GPR32RegClass);
4216 if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) &&
4218 return std::make_pair(0U, &Mips::GPR64RegClass);
4219 // This will generate an error message
4220 return std::make_pair(0U, nullptr);
4221 case 'f': // FPU or MSA register
4222 if (VT == MVT::v16i8)
4223 return std::make_pair(0U, &Mips::MSA128BRegClass);
4224 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4225 return std::make_pair(0U, &Mips::MSA128HRegClass);
4226 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4227 return std::make_pair(0U, &Mips::MSA128WRegClass);
4228 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4229 return std::make_pair(0U, &Mips::MSA128DRegClass);
4230 else if (VT == MVT::f32)
4231 return std::make_pair(0U, &Mips::FGR32RegClass);
4232 else if ((VT == MVT::f64) && (!Subtarget.isSingleFloat())) {
4233 if (Subtarget.isFP64bit())
4234 return std::make_pair(0U, &Mips::FGR64RegClass);
4235 return std::make_pair(0U, &Mips::AFGR64RegClass);
4236 }
4237 break;
4238 case 'c': // register suitable for indirect jump
4239 if (VT == MVT::i32)
4240 return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);
4241 if (VT == MVT::i64)
4242 return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
4243 // This will generate an error message
4244 return std::make_pair(0U, nullptr);
4245 case 'l': // use the `lo` register to store values
4246 // that are no bigger than a word
4247 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4248 return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);
4249 return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);
4250 case 'x': // use the concatenated `hi` and `lo` registers
4251 // to store doubleword values
4252 // Fixme: Not triggering the use of both hi and low
4253 // This will generate an error message
4254 return std::make_pair(0U, nullptr);
4255 }
4256 }
4257
4258 if (!Constraint.empty()) {
4259 std::pair<unsigned, const TargetRegisterClass *> R;
4260 R = parseRegForInlineAsmConstraint(Constraint, VT);
4261
4262 if (R.second)
4263 return R;
4264 }
4265
4266 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
4267}
4268
4269/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4270/// vector. If it is invalid, don't add anything to Ops.
4271void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4272 StringRef Constraint,
4273 std::vector<SDValue> &Ops,
4274 SelectionDAG &DAG) const {
4275 SDLoc DL(Op);
4277
4278 // Only support length 1 constraints for now.
4279 if (Constraint.size() > 1)
4280 return;
4281
4282 char ConstraintLetter = Constraint[0];
4283 switch (ConstraintLetter) {
4284 default: break; // This will fall through to the generic implementation
4285 case 'I': // Signed 16 bit constant
4286 // If this fails, the parent routine will give an error
4287 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4288 EVT Type = Op.getValueType();
4289 int64_t Val = C->getSExtValue();
4290 if (isInt<16>(Val)) {
4292 break;
4293 }
4294 }
4295 return;
4296 case 'J': // integer zero
4297 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4298 EVT Type = Op.getValueType();
4299 int64_t Val = C->getZExtValue();
4300 if (Val == 0) {
4301 Result = DAG.getTargetConstant(0, DL, Type);
4302 break;
4303 }
4304 }
4305 return;
4306 case 'K': // unsigned 16 bit immediate
4307 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4308 EVT Type = Op.getValueType();
4309 uint64_t Val = (uint64_t)C->getZExtValue();
4310 if (isUInt<16>(Val)) {
4311 Result = DAG.getTargetConstant(Val, DL, Type);
4312 break;
4313 }
4314 }
4315 return;
4316 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4317 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4318 EVT Type = Op.getValueType();
4319 int64_t Val = C->getSExtValue();
4320 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
4322 break;
4323 }
4324 }
4325 return;
4326 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4327 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4328 EVT Type = Op.getValueType();
4329 int64_t Val = C->getSExtValue();
4330 if ((Val >= -65535) && (Val <= -1)) {
4332 break;
4333 }
4334 }
4335 return;
4336 case 'O': // signed 15 bit immediate
4337 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4338 EVT Type = Op.getValueType();
4339 int64_t Val = C->getSExtValue();
4340 if ((isInt<15>(Val))) {
4342 break;
4343 }
4344 }
4345 return;
4346 case 'P': // immediate in the range of 1 to 65535 (inclusive)
4347 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4348 EVT Type = Op.getValueType();
4349 int64_t Val = C->getSExtValue();
4350 if ((Val <= 65535) && (Val >= 1)) {
4351 Result = DAG.getTargetConstant(Val, DL, Type);
4352 break;
4353 }
4354 }
4355 return;
4356 }
4357
4358 if (Result.getNode()) {
4359 Ops.push_back(Result);
4360 return;
4361 }
4362
4363 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
4364}
4365
4366bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
4367 const AddrMode &AM, Type *Ty,
4368 unsigned AS,
4369 Instruction *I) const {
4370 // No global is ever allowed as a base.
4371 if (AM.BaseGV)
4372 return false;
4373
4374 switch (AM.Scale) {
4375 case 0: // "r+i" or just "i", depending on HasBaseReg.
4376 break;
4377 case 1:
4378 if (!AM.HasBaseReg) // allow "r+i".
4379 break;
4380 return false; // disallow "r+r" or "r+r+i".
4381 default:
4382 return false;
4383 }
4384
4385 return true;
4386}
4387
4388bool
4389MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4390 // The Mips target isn't yet aware of offsets.
4391 return false;
4392}
4393
4394EVT MipsTargetLowering::getOptimalMemOpType(
4395 const MemOp &Op, const AttributeList &FuncAttributes) const {
4396 if (Subtarget.hasMips64())
4397 return MVT::i64;
4398
4399 return MVT::i32;
4400}
4401
4402bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4403 bool ForCodeSize) const {
4404 if (VT != MVT::f32 && VT != MVT::f64)
4405 return false;
4406 if (Imm.isNegZero())
4407 return false;
4408 return Imm.isZero();
4409}
4410
4411unsigned MipsTargetLowering::getJumpTableEncoding() const {
4412
4413 // FIXME: For space reasons this should be: EK_GPRel32BlockAddress.
4414 if (ABI.IsN64() && isPositionIndependent())
4416
4418}
4419
4420bool MipsTargetLowering::useSoftFloat() const {
4421 return Subtarget.useSoftFloat();
4422}
4423
4424void MipsTargetLowering::copyByValRegs(
4425 SDValue Chain, const SDLoc &DL, std::vector<SDValue> &OutChains,
4426 SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
4427 SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
4428 unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,
4429 MipsCCState &State) const {
4431 MachineFrameInfo &MFI = MF.getFrameInfo();
4432 unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
4433 unsigned NumRegs = LastReg - FirstReg;
4434 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4435 unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
4436 int FrameObjOffset;
4437 ArrayRef<MCPhysReg> ByValArgRegs = ABI.GetByValArgRegs();
4438
4439 if (RegAreaSize)
4440 FrameObjOffset =
4442 (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes);
4443 else
4444 FrameObjOffset = VA.getLocMemOffset();
4445
4446 // Create frame object.
4447 EVT PtrTy = getPointerTy(DAG.getDataLayout());
4448 // Make the fixed object stored to mutable so that the load instructions
4449 // referencing it have their memory dependencies added.
4450 // Set the frame object as isAliased which clears the underlying objects
4451 // vector in ScheduleDAGInstrs::buildSchedGraph() resulting in addition of all
4452 // stores as dependencies for loads referencing this fixed object.
4453 int FI = MFI.CreateFixedObject(FrameObjSize, FrameObjOffset, false, true);
4454 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4455 InVals.push_back(FIN);
4456
4457 if (!NumRegs)
4458 return;
4459
4460 // Copy arg registers.
4461 MVT RegTy = MVT::getIntegerVT(GPRSizeInBytes * 8);
4462 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4463
4464 for (unsigned I = 0; I < NumRegs; ++I) {
4465 unsigned ArgReg = ByValArgRegs[FirstReg + I];
4466 unsigned VReg = addLiveIn(MF, ArgReg, RC);
4467 unsigned Offset = I * GPRSizeInBytes;
4468 SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
4469 DAG.getConstant(Offset, DL, PtrTy));
4470 SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
4471 StorePtr, MachinePointerInfo(FuncArg, Offset));
4472 OutChains.push_back(Store);
4473 }
4474}
4475
4476// Copy byVal arg to registers and stack.
4477void MipsTargetLowering::passByValArg(
4478 SDValue Chain, const SDLoc &DL,
4479 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4480 SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
4481 MachineFrameInfo &MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg,
4482 unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle,
4483 const CCValAssign &VA) const {
4484 unsigned ByValSizeInBytes = Flags.getByValSize();
4485 unsigned OffsetInBytes = 0; // From beginning of struct
4486 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4487 Align Alignment =
4488 std::min(Flags.getNonZeroByValAlign(), Align(RegSizeInBytes));
4489 EVT PtrTy = getPointerTy(DAG.getDataLayout()),
4490 RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4491 unsigned NumRegs = LastReg - FirstReg;
4492
4493 if (NumRegs) {
4495 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4496 unsigned I = 0;
4497
4498 // Copy words to registers.
4499 for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {
4500 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4501 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4502 SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
4503 MachinePointerInfo(), Alignment);
4504 MemOpChains.push_back(LoadVal.getValue(1));
4505 unsigned ArgReg = ArgRegs[FirstReg + I];
4506 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
4507 }
4508
4509 // Return if the struct has been fully copied.
4510 if (ByValSizeInBytes == OffsetInBytes)
4511 return;
4512
4513 // Copy the remainder of the byval argument with sub-word loads and shifts.
4514 if (LeftoverBytes) {
4515 SDValue Val;
4516
4517 for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4518 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4519 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4520
4521 if (RemainingSizeInBytes < LoadSizeInBytes)
4522 continue;
4523
4524 // Load subword.
4525 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4526 DAG.getConstant(OffsetInBytes, DL,
4527 PtrTy));
4528 SDValue LoadVal = DAG.getExtLoad(
4529 ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
4530 MVT::getIntegerVT(LoadSizeInBytes * 8), Alignment);
4531 MemOpChains.push_back(LoadVal.getValue(1));
4532
4533 // Shift the loaded value.
4534 unsigned Shamt;
4535
4536 if (isLittle)
4537 Shamt = TotalBytesLoaded * 8;
4538 else
4539 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4540
4541 SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
4542 DAG.getConstant(Shamt, DL, MVT::i32));
4543
4544 if (Val.getNode())
4545 Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift);
4546 else
4547 Val = Shift;
4548
4549 OffsetInBytes += LoadSizeInBytes;
4550 TotalBytesLoaded += LoadSizeInBytes;
4551 Alignment = std::min(Alignment, Align(LoadSizeInBytes));
4552 }
4553
4554 unsigned ArgReg = ArgRegs[FirstReg + I];
4555 RegsToPass.push_back(std::make_pair(ArgReg, Val));
4556 return;
4557 }
4558 }
4559
4560 // Copy remainder of byval arg to it with memcpy.
4561 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4562 SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4563 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4564 SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
4566 Chain = DAG.getMemcpy(
4567 Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, DL, PtrTy),
4568 Align(Alignment), /*isVolatile=*/false, /*AlwaysInline=*/false,
4569 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(), MachinePointerInfo());
4570 MemOpChains.push_back(Chain);
4571}
4572
4573void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
4574 SDValue Chain, const SDLoc &DL,
4575 SelectionDAG &DAG,
4576 CCState &State) const {
4578 unsigned Idx = State.getFirstUnallocated(ArgRegs);
4579 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4580 MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4581 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4583 MachineFrameInfo &MFI = MF.getFrameInfo();
4585
4586 // Offset of the first variable argument from stack pointer.
4587 int VaArgOffset;
4588
4589 if (ArgRegs.size() == Idx)
4590 VaArgOffset = alignTo(State.getStackSize(), RegSizeInBytes);
4591 else {
4592 VaArgOffset =
4594 (int)(RegSizeInBytes * (ArgRegs.size() - Idx));
4595 }
4596
4597 // Record the frame index of the first variable argument
4598 // which is a value necessary to VASTART.
4599 int FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4600 MipsFI->setVarArgsFrameIndex(FI);
4601
4602 // Copy the integer registers that have not been used for argument passing
4603 // to the argument register save area. For O32, the save area is allocated
4604 // in the caller's stack frame, while for N32/64, it is allocated in the
4605 // callee's stack frame.
4606 for (unsigned I = Idx; I < ArgRegs.size();
4607 ++I, VaArgOffset += RegSizeInBytes) {
4608 unsigned Reg = addLiveIn(MF, ArgRegs[I], RC);
4609 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy);
4610 FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4611 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4612 SDValue Store =
4613 DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo());
4614 cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(
4615 (Value *)nullptr);
4616 OutChains.push_back(Store);
4617 }
4618}
4619
4621 Align Alignment) const {
4623
4624 assert(Size && "Byval argument's size shouldn't be 0.");
4625
4626 Alignment = std::min(Alignment, TFL->getStackAlign());
4627
4628 unsigned FirstReg = 0;
4629 unsigned NumRegs = 0;
4630
4631 if (State->getCallingConv() != CallingConv::Fast) {
4632 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4634 // FIXME: The O32 case actually describes no shadow registers.
4635 const MCPhysReg *ShadowRegs =
4636 ABI.IsO32() ? IntArgRegs.data() : Mips64DPRegs;
4637
4638 // We used to check the size as well but we can't do that anymore since
4639 // CCState::HandleByVal() rounds up the size after calling this function.
4640 assert(
4641 Alignment >= Align(RegSizeInBytes) &&
4642 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4643
4644 FirstReg = State->getFirstUnallocated(IntArgRegs);
4645
4646 // If Alignment > RegSizeInBytes, the first arg register must be even.
4647 // FIXME: This condition happens to do the right thing but it's not the
4648 // right way to test it. We want to check that the stack frame offset
4649 // of the register is aligned.
4650 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4651 State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
4652 ++FirstReg;
4653 }
4654
4655 // Mark the registers allocated.
4656 Size = alignTo(Size, RegSizeInBytes);
4657 for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
4658 Size -= RegSizeInBytes, ++I, ++NumRegs)
4659 State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
4660 }
4661
4662 State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
4663}
4664
4665MachineBasicBlock *MipsTargetLowering::emitPseudoSELECT(MachineInstr &MI,
4667 bool isFPCmp,
4668 unsigned Opc) const {
4670 "Subtarget already supports SELECT nodes with the use of"
4671 "conditional-move instructions.");
4672
4673 const TargetInstrInfo *TII =
4675 DebugLoc DL = MI.getDebugLoc();
4676
4677 // To "insert" a SELECT instruction, we actually have to insert the
4678 // diamond control-flow pattern. The incoming instruction knows the
4679 // destination vreg to set, the condition code register to branch on, the
4680 // true/false values to select between, and a branch opcode to use.
4681 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4683
4684 // thisMBB:
4685 // ...
4686 // TrueVal = ...
4687 // setcc r1, r2, r3
4688 // bNE r1, r0, copy1MBB
4689 // fallthrough --> copy0MBB
4690 MachineBasicBlock *thisMBB = BB;
4691 MachineFunction *F = BB->getParent();
4692 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4693 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4694 F->insert(It, copy0MBB);
4695 F->insert(It, sinkMBB);
4696
4697 // Transfer the remainder of BB and its successor edges to sinkMBB.
4698 sinkMBB->splice(sinkMBB->begin(), BB,
4699 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4701
4702 // Next, add the true and fallthrough blocks as its successors.
4703 BB->addSuccessor(copy0MBB);
4704 BB->addSuccessor(sinkMBB);
4705
4706 if (isFPCmp) {
4707 // bc1[tf] cc, sinkMBB
4708 BuildMI(BB, DL, TII->get(Opc))
4709 .addReg(MI.getOperand(1).getReg())
4710 .addMBB(sinkMBB);
4711 } else {
4712 // bne rs, $0, sinkMBB
4713 BuildMI(BB, DL, TII->get(Opc))
4714 .addReg(MI.getOperand(1).getReg())
4715 .addReg(Mips::ZERO)
4716 .addMBB(sinkMBB);
4717 }
4718
4719 // copy0MBB:
4720 // %FalseValue = ...
4721 // # fallthrough to sinkMBB
4722 BB = copy0MBB;
4723
4724 // Update machine-CFG edges
4725 BB->addSuccessor(sinkMBB);
4726
4727 // sinkMBB:
4728 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4729 // ...
4730 BB = sinkMBB;
4731
4732 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4733 .addReg(MI.getOperand(2).getReg())
4734 .addMBB(thisMBB)
4735 .addReg(MI.getOperand(3).getReg())
4736 .addMBB(copy0MBB);
4737
4738 MI.eraseFromParent(); // The pseudo instruction is gone now.
4739
4740 return BB;
4741}
4742
4744MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
4745 MachineBasicBlock *BB) const {
4747 "Subtarget already supports SELECT nodes with the use of"
4748 "conditional-move instructions.");
4749
4751 DebugLoc DL = MI.getDebugLoc();
4752
4753 // D_SELECT substitutes two SELECT nodes that goes one after another and
4754 // have the same condition operand. On machines which don't have
4755 // conditional-move instruction, it reduces unnecessary branch instructions
4756 // which are result of using two diamond patterns that are result of two
4757 // SELECT pseudo instructions.
4758 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4760
4761 // thisMBB:
4762 // ...
4763 // TrueVal = ...
4764 // setcc r1, r2, r3
4765 // bNE r1, r0, copy1MBB
4766 // fallthrough --> copy0MBB
4767 MachineBasicBlock *thisMBB = BB;
4768 MachineFunction *F = BB->getParent();
4769 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4770 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4771 F->insert(It, copy0MBB);
4772 F->insert(It, sinkMBB);
4773
4774 // Transfer the remainder of BB and its successor edges to sinkMBB.
4775 sinkMBB->splice(sinkMBB->begin(), BB,
4776 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4778
4779 // Next, add the true and fallthrough blocks as its successors.
4780 BB->addSuccessor(copy0MBB);
4781 BB->addSuccessor(sinkMBB);
4782
4783 // bne rs, $0, sinkMBB
4784 BuildMI(BB, DL, TII->get(Mips::BNE))
4785 .addReg(MI.getOperand(2).getReg())
4786 .addReg(Mips::ZERO)
4787 .addMBB(sinkMBB);
4788
4789 // copy0MBB:
4790 // %FalseValue = ...
4791 // # fallthrough to sinkMBB
4792 BB = copy0MBB;
4793
4794 // Update machine-CFG edges
4795 BB->addSuccessor(sinkMBB);
4796
4797 // sinkMBB:
4798 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4799 // ...
4800 BB = sinkMBB;
4801
4802 // Use two PHI nodes to select two reults
4803 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4804 .addReg(MI.getOperand(3).getReg())
4805 .addMBB(thisMBB)
4806 .addReg(MI.getOperand(5).getReg())
4807 .addMBB(copy0MBB);
4808 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(1).getReg())
4809 .addReg(MI.getOperand(4).getReg())
4810 .addMBB(thisMBB)
4811 .addReg(MI.getOperand(6).getReg())
4812 .addMBB(copy0MBB);
4813
4814 MI.eraseFromParent(); // The pseudo instruction is gone now.
4815
4816 return BB;
4817}
4818
4819// FIXME? Maybe this could be a TableGen attribute on some registers and
4820// this table could be generated automatically from RegInfo.
4823 const MachineFunction &MF) const {
4824 // The Linux kernel uses $28 and sp.
4825 if (Subtarget.isGP64bit()) {
4827 .Case("$28", Mips::GP_64)
4828 .Case("sp", Mips::SP_64)
4829 .Default(Register());
4830 if (Reg)
4831 return Reg;
4832 } else {
4834 .Case("$28", Mips::GP)
4835 .Case("sp", Mips::SP)
4836 .Default(Register());
4837 if (Reg)
4838 return Reg;
4839 }
4840 report_fatal_error("Invalid register name global variable");
4841}
4842
4843MachineBasicBlock *MipsTargetLowering::emitLDR_W(MachineInstr &MI,
4844 MachineBasicBlock *BB) const {
4845 MachineFunction *MF = BB->getParent();
4848 const bool IsLittle = Subtarget.isLittle();
4849 DebugLoc DL = MI.getDebugLoc();
4850
4851 Register Dest = MI.getOperand(0).getReg();
4852 Register Address = MI.getOperand(1).getReg();
4853 unsigned Imm = MI.getOperand(2).getImm();
4854
4856
4858 // Mips release 6 can load from adress that is not naturally-aligned.
4859 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4860 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4861 .addDef(Temp)
4862 .addUse(Address)
4863 .addImm(Imm);
4864 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(Temp);
4865 } else {
4866 // Mips release 5 needs to use instructions that can load from an unaligned
4867 // memory address.
4868 Register LoadHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4869 Register LoadFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4870 Register Undef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4871 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(Undef);
4872 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4873 .addDef(LoadHalf)
4874 .addUse(Address)
4875 .addImm(Imm + (IsLittle ? 0 : 3))
4876 .addUse(Undef);
4877 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4878 .addDef(LoadFull)
4879 .addUse(Address)
4880 .addImm(Imm + (IsLittle ? 3 : 0))
4881 .addUse(LoadHalf);
4882 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(LoadFull);
4883 }
4884
4885 MI.eraseFromParent();
4886 return BB;
4887}
4888
4889MachineBasicBlock *MipsTargetLowering::emitLDR_D(MachineInstr &MI,
4890 MachineBasicBlock *BB) const {
4891 MachineFunction *MF = BB->getParent();
4894 const bool IsLittle = Subtarget.isLittle();
4895 DebugLoc DL = MI.getDebugLoc();
4896
4897 Register Dest = MI.getOperand(0).getReg();
4898 Register Address = MI.getOperand(1).getReg();
4899 unsigned Imm = MI.getOperand(2).getImm();
4900
4902
4904 // Mips release 6 can load from adress that is not naturally-aligned.
4905 if (Subtarget.isGP64bit()) {
4906 Register Temp = MRI.createVirtualRegister(&Mips::GPR64RegClass);
4907 BuildMI(*BB, I, DL, TII->get(Mips::LD))
4908 .addDef(Temp)
4909 .addUse(Address)
4910 .addImm(Imm);
4911 BuildMI(*BB, I, DL, TII->get(Mips::FILL_D)).addDef(Dest).addUse(Temp);
4912 } else {
4913 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4914 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4915 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4916 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4917 .addDef(Lo)
4918 .addUse(Address)
4919 .addImm(Imm + (IsLittle ? 0 : 4));
4920 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4921 .addDef(Hi)
4922 .addUse(Address)
4923 .addImm(Imm + (IsLittle ? 4 : 0));
4924 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(Lo);
4925 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4926 .addUse(Wtemp)
4927 .addUse(Hi)
4928 .addImm(1);
4929 }
4930 } else {
4931 // Mips release 5 needs to use instructions that can load from an unaligned
4932 // memory address.
4933 Register LoHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4934 Register LoFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4935 Register LoUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4936 Register HiHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4937 Register HiFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4938 Register HiUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4939 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4940 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(LoUndef);
4941 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4942 .addDef(LoHalf)
4943 .addUse(Address)
4944 .addImm(Imm + (IsLittle ? 0 : 7))
4945 .addUse(LoUndef);
4946 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4947 .addDef(LoFull)
4948 .addUse(Address)
4949 .addImm(Imm + (IsLittle ? 3 : 4))
4950 .addUse(LoHalf);
4951 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(HiUndef);
4952 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4953 .addDef(HiHalf)
4954 .addUse(Address)
4955 .addImm(Imm + (IsLittle ? 4 : 3))
4956 .addUse(HiUndef);
4957 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4958 .addDef(HiFull)
4959 .addUse(Address)
4960 .addImm(Imm + (IsLittle ? 7 : 0))
4961 .addUse(HiHalf);
4962 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(LoFull);
4963 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4964 .addUse(Wtemp)
4965 .addUse(HiFull)
4966 .addImm(1);
4967 }
4968
4969 MI.eraseFromParent();
4970 return BB;
4971}
4972
4973MachineBasicBlock *MipsTargetLowering::emitSTR_W(MachineInstr &MI,
4974 MachineBasicBlock *BB) const {
4975 MachineFunction *MF = BB->getParent();
4978 const bool IsLittle = Subtarget.isLittle();
4979 DebugLoc DL = MI.getDebugLoc();
4980
4981 Register StoreVal = MI.getOperand(0).getReg();
4982 Register Address = MI.getOperand(1).getReg();
4983 unsigned Imm = MI.getOperand(2).getImm();
4984
4986
4988 // Mips release 6 can store to adress that is not naturally-aligned.
4989 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4990 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4991 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(BitcastW).addUse(StoreVal);
4992 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4993 .addDef(Tmp)
4994 .addUse(BitcastW)
4995 .addImm(0);
4996 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4997 .addUse(Tmp)
4998 .addUse(Address)
4999 .addImm(Imm);
5000 } else {
5001 // Mips release 5 needs to use instructions that can store to an unaligned
5002 // memory address.
5003 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5004 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5005 .addDef(Tmp)
5006 .addUse(StoreVal)
5007 .addImm(0);
5008 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5009 .addUse(Tmp)
5010 .addUse(Address)
5011 .addImm(Imm + (IsLittle ? 0 : 3));
5012 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5013 .addUse(Tmp)
5014 .addUse(Address)
5015 .addImm(Imm + (IsLittle ? 3 : 0));
5016 }
5017
5018 MI.eraseFromParent();
5019
5020 return BB;
5021}
5022
5023MachineBasicBlock *MipsTargetLowering::emitSTR_D(MachineInstr &MI,
5024 MachineBasicBlock *BB) const {
5025 MachineFunction *MF = BB->getParent();
5028 const bool IsLittle = Subtarget.isLittle();
5029 DebugLoc DL = MI.getDebugLoc();
5030
5031 Register StoreVal = MI.getOperand(0).getReg();
5032 Register Address = MI.getOperand(1).getReg();
5033 unsigned Imm = MI.getOperand(2).getImm();
5034
5036
5038 // Mips release 6 can store to adress that is not naturally-aligned.
5039 if (Subtarget.isGP64bit()) {
5040 Register BitcastD = MRI.createVirtualRegister(&Mips::MSA128DRegClass);
5041 Register Lo = MRI.createVirtualRegister(&Mips::GPR64RegClass);
5042 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
5043 .addDef(BitcastD)
5044 .addUse(StoreVal);
5045 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_D))
5046 .addDef(Lo)
5047 .addUse(BitcastD)
5048 .addImm(0);
5049 BuildMI(*BB, I, DL, TII->get(Mips::SD))
5050 .addUse(Lo)
5051 .addUse(Address)
5052 .addImm(Imm);
5053 } else {
5054 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5055 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5056 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5057 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
5058 .addDef(BitcastW)
5059 .addUse(StoreVal);
5060 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5061 .addDef(Lo)
5062 .addUse(BitcastW)
5063 .addImm(0);
5064 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5065 .addDef(Hi)
5066 .addUse(BitcastW)
5067 .addImm(1);
5068 BuildMI(*BB, I, DL, TII->get(Mips::SW))
5069 .addUse(Lo)
5070 .addUse(Address)
5071 .addImm(Imm + (IsLittle ? 0 : 4));
5072 BuildMI(*BB, I, DL, TII->get(Mips::SW))
5073 .addUse(Hi)
5074 .addUse(Address)
5075 .addImm(Imm + (IsLittle ? 4 : 0));
5076 }
5077 } else {
5078 // Mips release 5 needs to use instructions that can store to an unaligned
5079 // memory address.
5080 Register Bitcast = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5081 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5082 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5083 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(Bitcast).addUse(StoreVal);
5084 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5085 .addDef(Lo)
5086 .addUse(Bitcast)
5087 .addImm(0);
5088 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5089 .addDef(Hi)
5090 .addUse(Bitcast)
5091 .addImm(1);
5092 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5093 .addUse(Lo)
5094 .addUse(Address)
5095 .addImm(Imm + (IsLittle ? 0 : 3));
5096 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5097 .addUse(Lo)
5098 .addUse(Address)
5099 .addImm(Imm + (IsLittle ? 3 : 0));
5100 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5101 .addUse(Hi)
5102 .addUse(Address)
5103 .addImm(Imm + (IsLittle ? 4 : 7));
5104 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5105 .addUse(Hi)
5106 .addUse(Address)
5107 .addImm(Imm + (IsLittle ? 7 : 4));
5108 }
5109
5110 MI.eraseFromParent();
5111 return BB;
5112}
unsigned const MachineRegisterInfo * MRI
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:282
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
lazy value info
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
cl::opt< bool > EmitJalrReloc
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) LLVM_ATTRIBUTE_UNUSED
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, ArrayRef< MCPhysReg > F64Regs)
static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, const MipsSubtarget &Subtarget)
static bool invertFPCondCodeUser(Mips::CondCode CC)
This function returns true if the floating point conditional branches and conditional moves which use...
static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG, bool SingleFloat)
static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static const MCPhysReg Mips64DPRegs[8]
static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG, bool IsLittle)
static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, SDValue Chain, unsigned Offset)
static unsigned addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
static std::pair< bool, bool > parsePhysicalReg(StringRef C, StringRef &Prefix, unsigned long long &Reg)
This is a helper function to parse a physical register string and split it into non-numeric and numer...
static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, SDValue Chain, SDValue Src, unsigned Offset)
static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
cl::opt< bool > EmitJalrReloc
static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op)
static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static cl::opt< bool > NoZeroDivCheck("mno-check-zero-division", cl::Hidden, cl::desc("MIPS: Don't trap on integer division by zero."), cl::init(false))
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA, EVT ArgVT, const SDLoc &DL, SelectionDAG &DAG)
static Mips::CondCode condCodeToFCC(ISD::CondCode CC)
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, const SDLoc &DL)
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallVector class.
static const MCPhysReg IntRegs[32]
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const MCPhysReg F32Regs[64]
Value * RHS
Value * LHS
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
const T * data() const
Definition: ArrayRef.h:165
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
static BranchProbability getOne()
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
CallingConv::ID getCallingConv() const
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
bool isVarArg() const
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
bool isUpperBitsInLoc() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isMemLoc() const
int64_t getLocMemOffset() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:457
A debug info location.
Definition: DebugLoc.h:33
const char * getSymbol() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:688
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
const GlobalObject * getAliaseeObject() const
Definition: Globals.cpp:400
bool hasInternalLinkage() const
Definition: GlobalValue.h:526
Class to represent integer types.
Definition: DerivedTypes.h:42
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:212
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Machine Value Type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
static auto fp_fixedlen_vector_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ MOVolatile
The memory access is volatile.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getAlign() const
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool IsN64() const
Definition: MipsABIInfo.h:42
ArrayRef< MCPhysReg > GetVarArgRegs() const
The registers to use for the variable argument list.
Definition: MipsABIInfo.cpp:40
bool ArePtrs64bit() const
Definition: MipsABIInfo.h:73
unsigned GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const
Obtain the size of the area allocated by the callee for arguments.
Definition: MipsABIInfo.cpp:48
unsigned GetPtrAddiuOp() const
unsigned GetPtrAndOp() const
ArrayRef< MCPhysReg > GetByValArgRegs() const
The registers to use for byval arguments.
Definition: MipsABIInfo.cpp:32
unsigned GetNullPtr() const
Definition: MipsABIInfo.cpp:89
bool IsN32() const
Definition: MipsABIInfo.h:41
bool IsO32() const
Definition: MipsABIInfo.h:40
bool WasOriginalArgVectorFloat(unsigned ValNo) const
Definition: MipsCCState.h:198
static SpecialCallingConvType getSpecialCallingConvForCallee(const SDNode *Callee, const MipsSubtarget &Subtarget)
Determine the SpecialCallingConvType for the given callee.
Definition: MipsCCState.cpp:70
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
void setVarArgsFrameIndex(int Index)
unsigned getSRetReturnReg() const
MachinePointerInfo callPtrInfo(MachineFunction &MF, const char *ES)
Create a MachinePointerInfo that has an ExternalSymbolPseudoSourceValue object representing a GOT ent...
Register getGlobalBaseReg(MachineFunction &MF)
void setSRetReturnReg(unsigned Reg)
void setFormalArgInfo(unsigned Size, bool HasByval)
static const uint32_t * getMips16RetHelperMask()
bool hasMips32r6() const
bool hasMips4() const
bool hasMips64r2() const
bool isFP64bit() const
bool isLittle() const
bool inMicroMipsMode() const
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
bool hasMips64r6() const
bool inMips16Mode() const
bool hasMips64() const
bool hasMips32() const
bool hasSym32() const
bool useXGOT() const
bool inAbs2008Mode() const
const MipsRegisterInfo * getRegisterInfo() const override
bool isABICalls() const
bool hasCnMips() const
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
bool isGP64bit() const
bool hasExtractInsert() const
Features related to the presence of specific instructions.
bool hasMips32r2() const
bool hasMSA() const
bool isSingleFloat() const
bool isABI_O32() const
bool useLongCalls() const
unsigned getGPRSizeInBytes() const
bool inMips16HardFloat() const
const TargetFrameLowering * getFrameLowering() const override
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
static const MipsTargetLowering * create(const MipsTargetMachine &TM, const MipsSubtarget &STI)
SDValue getAddrGPRel(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN64) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Break down vectors to the correct number of gpr sized integers.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
SDValue getAddrNonPICSym64(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - get the ISD::SETCC result ValueType
SDValue getAddrGlobal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
MipsTargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
const MipsABIInfo & ABI
SDValue getAddrGlobalLargeGOT(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned HiFlag, unsigned LoFlag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
CCAssignFn * CCAssignFnForReturn() const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
CCAssignFn * CCAssignFnForCall() const
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
virtual void getOpndList(SmallVectorImpl< SDValue > &Ops, std::deque< std::pair< unsigned, SDValue > > &RegsToPass, bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const
This function fills Ops, which is the list of operands that will later be used when a function call n...
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const override
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
SDValue getAddrLocal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN32OrN64) const
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const
const MipsSubtarget & Subtarget
void HandleByVal(CCState *, unsigned &, Align) const override
Target-specific cleanup for formal ByVal parameters.
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const
bool IsConstantInSmallSection(const DataLayout &DL, const Constant *CN, const TargetMachine &TM) const
Return true if this constant should be placed into small data section.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:748
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
Definition: SelectionDAG.h:799
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:758
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
Definition: SelectionDAG.h:825
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:495
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:710
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:496
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:698
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:794
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:490
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)
Set CallSiteInfo to be associated with Node.
LLVMContext * getContext() const
Definition: SelectionDAG.h:508
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:765
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:578
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
virtual TargetLoweringObjectFile * getObjFileLowering() const
TargetOptions Options
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:153
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
@ Entry
Definition: COFF.h:844
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:780
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1197
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1193
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:744
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1226
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:1312
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1102
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:814
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:498
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:397
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1304
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:262
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:964
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:954
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:236
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition: ISDOpcodes.h:141
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:805
@ TargetJumpTable
Definition: ISDOpcodes.h:173
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:1059
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1148
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1127
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition: ISDOpcodes.h:515
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
Definition: ISDOpcodes.h:522
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:757
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:1308
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1222
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:735
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
Definition: ISDOpcodes.h:1044
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:811
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:772
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:1031
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1112
@ ConstantPool
Definition: ISDOpcodes.h:82
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:849
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
Definition: ISDOpcodes.h:135
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:100
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:887
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:709
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1279
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:817
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1217
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1141
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:794
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:508
@ AssertZext
Definition: ISDOpcodes.h:62
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
Definition: ISDOpcodes.h:1211
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1613
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:1593
@ Bitcast
Perform the operation on a different, but equivalently sized type.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ MO_GOT_CALL
MO_GOT_CALL - Represents the offset into the global offset table at which the address of a call site ...
Definition: MipsBaseInfo.h:44
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
Definition: MipsBaseInfo.h:73
@ MO_GOT
MO_GOT - Represents the offset into the global offset table at which the address the relocation entry...
Definition: MipsBaseInfo.h:38
@ MO_JALR
Helper operand used to generate R_MIPS_JALR.
Definition: MipsBaseInfo.h:95
@ MO_GOTTPREL
MO_GOTTPREL - Represents the offset from the thread pointer (Initial.
Definition: MipsBaseInfo.h:69
@ MO_GOT_HI16
MO_GOT_HI16/LO16, MO_CALL_HI16/LO16 - Relocations used for large GOTs.
Definition: MipsBaseInfo.h:89
@ MO_TLSLDM
MO_TLSLDM - Represents the offset into the global offset table at which.
Definition: MipsBaseInfo.h:63
@ MO_TLSGD
MO_TLSGD - Represents the offset into the global offset table at which.
Definition: MipsBaseInfo.h:58
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Define
Register definition.
@ Kill
The last use of a register.
@ EarlyClobber
Register definition happens before uses.
Not(const Pred &P) -> Not< Pred >
@ GeneralDynamic
Definition: CodeGen.h:46
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:285
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:403
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CombineLevel
Definition: DAGCombine.h:15
const MipsTargetLowering * createMips16TargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
Create MipsTargetLowering objects.
@ Or
Bitwise or logical OR of integers.
@ Add
Sum of integers.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
DWARFExpression::Operation Op
const MipsTargetLowering * createMipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
Definition: StringRef.cpp:488
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Extended Value Type.
Definition: ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:94
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:295
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
Definition: ValueTypes.h:465
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
Definition: ValueTypes.h:59
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:210
bool isRound() const
Return true if the size is a power-of-two number of bytes.
Definition: ValueTypes.h:243
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:323
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:331
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:152
Align getNonZeroOrigAlign() const
SmallVector< ArgRegPair, 1 > ArgRegPairs
Vector of call argument and its forwarding register.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals