LLVM 19.0.0git
MipsISelLowering.cpp
Go to the documentation of this file.
1//===- MipsISelLowering.cpp - Mips DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that Mips uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MipsISelLowering.h"
18#include "MipsCCState.h"
19#include "MipsInstrInfo.h"
20#include "MipsMachineFunction.h"
21#include "MipsRegisterInfo.h"
22#include "MipsSubtarget.h"
23#include "MipsTargetMachine.h"
25#include "llvm/ADT/APFloat.h"
26#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/Statistic.h"
29#include "llvm/ADT/StringRef.h"
51#include "llvm/IR/CallingConv.h"
52#include "llvm/IR/Constants.h"
53#include "llvm/IR/DataLayout.h"
54#include "llvm/IR/DebugLoc.h"
56#include "llvm/IR/Function.h"
57#include "llvm/IR/GlobalValue.h"
58#include "llvm/IR/Type.h"
59#include "llvm/IR/Value.h"
60#include "llvm/MC/MCContext.h"
70#include <algorithm>
71#include <cassert>
72#include <cctype>
73#include <cstdint>
74#include <deque>
75#include <iterator>
76#include <utility>
77#include <vector>
78
79using namespace llvm;
80
81#define DEBUG_TYPE "mips-lower"
82
83STATISTIC(NumTailCalls, "Number of tail calls");
84
85static cl::opt<bool>
86NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
87 cl::desc("MIPS: Don't trap on integer division by zero."),
88 cl::init(false));
89
91
92static const MCPhysReg Mips64DPRegs[8] = {
93 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
94 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
95};
96
97// The MIPS MSA ABI passes vector arguments in the integer register set.
98// The number of integer registers used is dependant on the ABI used.
101 EVT VT) const {
102 if (!VT.isVector())
103 return getRegisterType(Context, VT);
104
106 return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
107 : MVT::i64;
109}
110
113 EVT VT) const {
114 if (VT.isVector()) {
116 return divideCeil(VT.getSizeInBits(), Subtarget.isABI_O32() ? 32 : 64);
117 return VT.getVectorNumElements() *
119 }
121}
122
124 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
125 unsigned &NumIntermediates, MVT &RegisterVT) const {
126 if (VT.isPow2VectorType()) {
127 IntermediateVT = getRegisterTypeForCallingConv(Context, CC, VT);
128 RegisterVT = IntermediateVT.getSimpleVT();
129 NumIntermediates = getNumRegistersForCallingConv(Context, CC, VT);
130 return NumIntermediates;
131 }
132 IntermediateVT = VT.getVectorElementType();
133 NumIntermediates = VT.getVectorNumElements();
134 RegisterVT = getRegisterType(Context, IntermediateVT);
135 return NumIntermediates * getNumRegisters(Context, IntermediateVT);
136}
137
141 return DAG.getRegister(FI->getGlobalBaseReg(MF), Ty);
142}
143
144SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
145 SelectionDAG &DAG,
146 unsigned Flag) const {
147 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
148}
149
150SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
151 SelectionDAG &DAG,
152 unsigned Flag) const {
153 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
154}
155
156SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
157 SelectionDAG &DAG,
158 unsigned Flag) const {
159 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
160}
161
162SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
163 SelectionDAG &DAG,
164 unsigned Flag) const {
165 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
166}
167
168SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
169 SelectionDAG &DAG,
170 unsigned Flag) const {
171 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
172 N->getOffset(), Flag);
173}
174
175const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
176 switch ((MipsISD::NodeType)Opcode) {
177 case MipsISD::FIRST_NUMBER: break;
178 case MipsISD::JmpLink: return "MipsISD::JmpLink";
179 case MipsISD::TailCall: return "MipsISD::TailCall";
180 case MipsISD::Highest: return "MipsISD::Highest";
181 case MipsISD::Higher: return "MipsISD::Higher";
182 case MipsISD::Hi: return "MipsISD::Hi";
183 case MipsISD::Lo: return "MipsISD::Lo";
184 case MipsISD::GotHi: return "MipsISD::GotHi";
185 case MipsISD::TlsHi: return "MipsISD::TlsHi";
186 case MipsISD::GPRel: return "MipsISD::GPRel";
187 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
188 case MipsISD::Ret: return "MipsISD::Ret";
189 case MipsISD::ERet: return "MipsISD::ERet";
190 case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
191 case MipsISD::FAbs: return "MipsISD::FAbs";
192 case MipsISD::FMS: return "MipsISD::FMS";
193 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
194 case MipsISD::FPCmp: return "MipsISD::FPCmp";
195 case MipsISD::FSELECT: return "MipsISD::FSELECT";
196 case MipsISD::MTC1_D64: return "MipsISD::MTC1_D64";
197 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
198 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
199 case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
200 case MipsISD::MFHI: return "MipsISD::MFHI";
201 case MipsISD::MFLO: return "MipsISD::MFLO";
202 case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
203 case MipsISD::Mult: return "MipsISD::Mult";
204 case MipsISD::Multu: return "MipsISD::Multu";
205 case MipsISD::MAdd: return "MipsISD::MAdd";
206 case MipsISD::MAddu: return "MipsISD::MAddu";
207 case MipsISD::MSub: return "MipsISD::MSub";
208 case MipsISD::MSubu: return "MipsISD::MSubu";
209 case MipsISD::DivRem: return "MipsISD::DivRem";
210 case MipsISD::DivRemU: return "MipsISD::DivRemU";
211 case MipsISD::DivRem16: return "MipsISD::DivRem16";
212 case MipsISD::DivRemU16: return "MipsISD::DivRemU16";
213 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
214 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
215 case MipsISD::Wrapper: return "MipsISD::Wrapper";
216 case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
217 case MipsISD::Sync: return "MipsISD::Sync";
218 case MipsISD::Ext: return "MipsISD::Ext";
219 case MipsISD::Ins: return "MipsISD::Ins";
220 case MipsISD::CIns: return "MipsISD::CIns";
221 case MipsISD::LWL: return "MipsISD::LWL";
222 case MipsISD::LWR: return "MipsISD::LWR";
223 case MipsISD::SWL: return "MipsISD::SWL";
224 case MipsISD::SWR: return "MipsISD::SWR";
225 case MipsISD::LDL: return "MipsISD::LDL";
226 case MipsISD::LDR: return "MipsISD::LDR";
227 case MipsISD::SDL: return "MipsISD::SDL";
228 case MipsISD::SDR: return "MipsISD::SDR";
229 case MipsISD::EXTP: return "MipsISD::EXTP";
230 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
231 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
232 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
233 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
234 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
235 case MipsISD::SHILO: return "MipsISD::SHILO";
236 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
237 case MipsISD::MULSAQ_S_W_PH: return "MipsISD::MULSAQ_S_W_PH";
238 case MipsISD::MAQ_S_W_PHL: return "MipsISD::MAQ_S_W_PHL";
239 case MipsISD::MAQ_S_W_PHR: return "MipsISD::MAQ_S_W_PHR";
240 case MipsISD::MAQ_SA_W_PHL: return "MipsISD::MAQ_SA_W_PHL";
241 case MipsISD::MAQ_SA_W_PHR: return "MipsISD::MAQ_SA_W_PHR";
242 case MipsISD::DOUBLE_SELECT_I: return "MipsISD::DOUBLE_SELECT_I";
243 case MipsISD::DOUBLE_SELECT_I64: return "MipsISD::DOUBLE_SELECT_I64";
244 case MipsISD::DPAU_H_QBL: return "MipsISD::DPAU_H_QBL";
245 case MipsISD::DPAU_H_QBR: return "MipsISD::DPAU_H_QBR";
246 case MipsISD::DPSU_H_QBL: return "MipsISD::DPSU_H_QBL";
247 case MipsISD::DPSU_H_QBR: return "MipsISD::DPSU_H_QBR";
248 case MipsISD::DPAQ_S_W_PH: return "MipsISD::DPAQ_S_W_PH";
249 case MipsISD::DPSQ_S_W_PH: return "MipsISD::DPSQ_S_W_PH";
250 case MipsISD::DPAQ_SA_L_W: return "MipsISD::DPAQ_SA_L_W";
251 case MipsISD::DPSQ_SA_L_W: return "MipsISD::DPSQ_SA_L_W";
252 case MipsISD::DPA_W_PH: return "MipsISD::DPA_W_PH";
253 case MipsISD::DPS_W_PH: return "MipsISD::DPS_W_PH";
254 case MipsISD::DPAQX_S_W_PH: return "MipsISD::DPAQX_S_W_PH";
255 case MipsISD::DPAQX_SA_W_PH: return "MipsISD::DPAQX_SA_W_PH";
256 case MipsISD::DPAX_W_PH: return "MipsISD::DPAX_W_PH";
257 case MipsISD::DPSX_W_PH: return "MipsISD::DPSX_W_PH";
258 case MipsISD::DPSQX_S_W_PH: return "MipsISD::DPSQX_S_W_PH";
259 case MipsISD::DPSQX_SA_W_PH: return "MipsISD::DPSQX_SA_W_PH";
260 case MipsISD::MULSA_W_PH: return "MipsISD::MULSA_W_PH";
261 case MipsISD::MULT: return "MipsISD::MULT";
262 case MipsISD::MULTU: return "MipsISD::MULTU";
263 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSP";
264 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
265 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
266 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
267 case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP";
268 case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP";
269 case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
270 case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
271 case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
272 case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
273 case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
274 case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
275 case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
276 case MipsISD::VCEQ: return "MipsISD::VCEQ";
277 case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
278 case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
279 case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
280 case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
281 case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
282 case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
283 case MipsISD::VNOR: return "MipsISD::VNOR";
284 case MipsISD::VSHF: return "MipsISD::VSHF";
285 case MipsISD::SHF: return "MipsISD::SHF";
286 case MipsISD::ILVEV: return "MipsISD::ILVEV";
287 case MipsISD::ILVOD: return "MipsISD::ILVOD";
288 case MipsISD::ILVL: return "MipsISD::ILVL";
289 case MipsISD::ILVR: return "MipsISD::ILVR";
290 case MipsISD::PCKEV: return "MipsISD::PCKEV";
291 case MipsISD::PCKOD: return "MipsISD::PCKOD";
292 case MipsISD::INSVE: return "MipsISD::INSVE";
293 }
294 return nullptr;
295}
296
298 const MipsSubtarget &STI)
299 : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
300 // Mips does not have i1 type, so use i32 for
301 // setcc operations results (slt, sgt, ...).
304 // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
305 // does. Integer booleans still use 0 and 1.
309
310 // Load extented operations for i1 types must be promoted
311 for (MVT VT : MVT::integer_valuetypes()) {
315 }
316
317 // MIPS doesn't have extending float->double load/store. Set LoadExtAction
318 // for f32, f16
319 for (MVT VT : MVT::fp_valuetypes()) {
320 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
321 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
322 }
323
324 // Set LoadExtAction for f16 vectors to Expand
326 MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements());
327 if (F16VT.isValid())
329 }
330
331 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
332 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
333
334 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
335
336 // Used by legalize types to correctly generate the setcc result.
337 // Without this, every float setcc comes with a AND/OR with the result,
338 // we don't want this, since the fpcmp result goes to a flag register,
339 // which is used implicitly by brcond and select operations.
340 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
341
342 // Mips Custom Operations
360
361 // Lower fmin and fmax operations for MIPS R6.
362 // Instructions are defined but never used.
368 }
369
370 if (Subtarget.isGP64bit()) {
377 if (Subtarget.hasMips64r6()) {
380 } else {
383 }
388 }
389
390 if (!Subtarget.isGP64bit()) {
394 }
395
397 if (Subtarget.isGP64bit())
399
408
409 // Operations not directly supported by Mips.
423 if (Subtarget.hasCnMips()) {
426 } else {
429 }
436
437 if (!Subtarget.hasMips32r2())
439
440 if (!Subtarget.hasMips64r2())
442
459
460 // Lower f16 conversion operations into library calls
465
467
472
473 // Use the default for now
476
477 if (!Subtarget.isGP64bit()) {
480 }
481
482 if (!Subtarget.hasMips32r2()) {
485 }
486
487 // MIPS16 lacks MIPS32's clz and clo instructions.
490 if (!Subtarget.hasMips64())
492
493 if (!Subtarget.hasMips32r2())
495 if (!Subtarget.hasMips64r2())
497
499 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Legal);
500 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Legal);
501 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Legal);
502 setTruncStoreAction(MVT::i64, MVT::i32, Legal);
503 } else if (Subtarget.isGP64bit()) {
504 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom);
505 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom);
506 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom);
507 setTruncStoreAction(MVT::i64, MVT::i32, Custom);
508 }
509
510 setOperationAction(ISD::TRAP, MVT::Other, Legal);
511
514
515 if (ABI.IsO32()) {
516 // These libcalls are not available in 32-bit.
517 setLibcallName(RTLIB::SHL_I128, nullptr);
518 setLibcallName(RTLIB::SRL_I128, nullptr);
519 setLibcallName(RTLIB::SRA_I128, nullptr);
520 setLibcallName(RTLIB::MUL_I128, nullptr);
521 setLibcallName(RTLIB::MULO_I64, nullptr);
522 setLibcallName(RTLIB::MULO_I128, nullptr);
523 }
524
525 if (Subtarget.isGP64bit())
527 else
529
531
532 // The arguments on the stack are defined in terms of 4-byte slots on O32
533 // and 8-byte slots on N32/N64.
535 : Align(4));
536
537 setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
538
540
541 isMicroMips = Subtarget.inMicroMipsMode();
542}
543
544const MipsTargetLowering *
546 const MipsSubtarget &STI) {
547 if (STI.inMips16Mode())
548 return createMips16TargetLowering(TM, STI);
549
550 return createMipsSETargetLowering(TM, STI);
551}
552
553// Create a fast isel object.
554FastISel *
556 const TargetLibraryInfo *libInfo) const {
557 const MipsTargetMachine &TM =
558 static_cast<const MipsTargetMachine &>(funcInfo.MF->getTarget());
559
560 // We support only the standard encoding [MIPS32,MIPS32R5] ISAs.
561 bool UseFastISel = TM.Options.EnableFastISel && Subtarget.hasMips32() &&
564
565 // Disable if either of the following is true:
566 // We do not generate PIC, the ABI is not O32, XGOT is being used.
567 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
569 UseFastISel = false;
570
571 return UseFastISel ? Mips::createFastISel(funcInfo, libInfo) : nullptr;
572}
573
575 EVT VT) const {
576 if (!VT.isVector())
577 return MVT::i32;
579}
580
583 const MipsSubtarget &Subtarget) {
584 if (DCI.isBeforeLegalizeOps())
585 return SDValue();
586
587 EVT Ty = N->getValueType(0);
588 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
589 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
590 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
592 SDLoc DL(N);
593
594 SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
595 N->getOperand(0), N->getOperand(1));
596 SDValue InChain = DAG.getEntryNode();
597 SDValue InGlue = DivRem;
598
599 // insert MFLO
600 if (N->hasAnyUseOfValue(0)) {
601 SDValue CopyFromLo = DAG.getCopyFromReg(InChain, DL, LO, Ty,
602 InGlue);
603 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo);
604 InChain = CopyFromLo.getValue(1);
605 InGlue = CopyFromLo.getValue(2);
606 }
607
608 // insert MFHI
609 if (N->hasAnyUseOfValue(1)) {
610 SDValue CopyFromHi = DAG.getCopyFromReg(InChain, DL,
611 HI, Ty, InGlue);
612 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi);
613 }
614
615 return SDValue();
616}
617
619 switch (CC) {
620 default: llvm_unreachable("Unknown fp condition code!");
621 case ISD::SETEQ:
622 case ISD::SETOEQ: return Mips::FCOND_OEQ;
623 case ISD::SETUNE: return Mips::FCOND_UNE;
624 case ISD::SETLT:
625 case ISD::SETOLT: return Mips::FCOND_OLT;
626 case ISD::SETGT:
627 case ISD::SETOGT: return Mips::FCOND_OGT;
628 case ISD::SETLE:
629 case ISD::SETOLE: return Mips::FCOND_OLE;
630 case ISD::SETGE:
631 case ISD::SETOGE: return Mips::FCOND_OGE;
632 case ISD::SETULT: return Mips::FCOND_ULT;
633 case ISD::SETULE: return Mips::FCOND_ULE;
634 case ISD::SETUGT: return Mips::FCOND_UGT;
635 case ISD::SETUGE: return Mips::FCOND_UGE;
636 case ISD::SETUO: return Mips::FCOND_UN;
637 case ISD::SETO: return Mips::FCOND_OR;
638 case ISD::SETNE:
639 case ISD::SETONE: return Mips::FCOND_ONE;
640 case ISD::SETUEQ: return Mips::FCOND_UEQ;
641 }
642}
643
644/// This function returns true if the floating point conditional branches and
645/// conditional moves which use condition code CC should be inverted.
647 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
648 return false;
649
651 "Illegal Condition Code");
652
653 return true;
654}
655
656// Creates and returns an FPCmp node from a setcc node.
657// Returns Op if setcc is not a floating point comparison.
659 // must be a SETCC node
660 if (Op.getOpcode() != ISD::SETCC)
661 return Op;
662
663 SDValue LHS = Op.getOperand(0);
664
665 if (!LHS.getValueType().isFloatingPoint())
666 return Op;
667
668 SDValue RHS = Op.getOperand(1);
669 SDLoc DL(Op);
670
671 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
672 // node if necessary.
673 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
674
675 return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
676 DAG.getConstant(condCodeToFCC(CC), DL, MVT::i32));
677}
678
679// Creates and returns a CMovFPT/F node.
681 SDValue False, const SDLoc &DL) {
682 ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
683 bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
684 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
685
686 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
687 True.getValueType(), True, FCC0, False, Cond);
688}
689
692 const MipsSubtarget &Subtarget) {
693 if (DCI.isBeforeLegalizeOps())
694 return SDValue();
695
696 SDValue SetCC = N->getOperand(0);
697
698 if ((SetCC.getOpcode() != ISD::SETCC) ||
699 !SetCC.getOperand(0).getValueType().isInteger())
700 return SDValue();
701
702 SDValue False = N->getOperand(2);
703 EVT FalseTy = False.getValueType();
704
705 if (!FalseTy.isInteger())
706 return SDValue();
707
708 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(False);
709
710 // If the RHS (False) is 0, we swap the order of the operands
711 // of ISD::SELECT (obviously also inverting the condition) so that we can
712 // take advantage of conditional moves using the $0 register.
713 // Example:
714 // return (a != 0) ? x : 0;
715 // load $reg, x
716 // movz $reg, $0, a
717 if (!FalseC)
718 return SDValue();
719
720 const SDLoc DL(N);
721
722 if (!FalseC->getZExtValue()) {
723 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
724 SDValue True = N->getOperand(1);
725
726 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
727 SetCC.getOperand(1),
729
730 return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
731 }
732
733 // If both operands are integer constants there's a possibility that we
734 // can do some interesting optimizations.
735 SDValue True = N->getOperand(1);
736 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(True);
737
738 if (!TrueC || !True.getValueType().isInteger())
739 return SDValue();
740
741 // We'll also ignore MVT::i64 operands as this optimizations proves
742 // to be ineffective because of the required sign extensions as the result
743 // of a SETCC operator is always MVT::i32 for non-vector types.
744 if (True.getValueType() == MVT::i64)
745 return SDValue();
746
747 int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
748
749 // 1) (a < x) ? y : y-1
750 // slti $reg1, a, x
751 // addiu $reg2, $reg1, y-1
752 if (Diff == 1)
753 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, False);
754
755 // 2) (a < x) ? y-1 : y
756 // slti $reg1, a, x
757 // xor $reg1, $reg1, 1
758 // addiu $reg2, $reg1, y-1
759 if (Diff == -1) {
760 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
761 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
762 SetCC.getOperand(1),
764 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
765 }
766
767 // Could not optimize.
768 return SDValue();
769}
770
773 const MipsSubtarget &Subtarget) {
774 if (DCI.isBeforeLegalizeOps())
775 return SDValue();
776
777 SDValue ValueIfTrue = N->getOperand(0), ValueIfFalse = N->getOperand(2);
778
779 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(ValueIfFalse);
780 if (!FalseC || FalseC->getZExtValue())
781 return SDValue();
782
783 // Since RHS (False) is 0, we swap the order of the True/False operands
784 // (obviously also inverting the condition) so that we can
785 // take advantage of conditional moves using the $0 register.
786 // Example:
787 // return (a != 0) ? x : 0;
788 // load $reg, x
789 // movz $reg, $0, a
790 unsigned Opc = (N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
792
793 SDValue FCC = N->getOperand(1), Glue = N->getOperand(3);
794 return DAG.getNode(Opc, SDLoc(N), ValueIfFalse.getValueType(),
795 ValueIfFalse, FCC, ValueIfTrue, Glue);
796}
797
800 const MipsSubtarget &Subtarget) {
801 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
802 return SDValue();
803
804 SDValue FirstOperand = N->getOperand(0);
805 unsigned FirstOperandOpc = FirstOperand.getOpcode();
806 SDValue Mask = N->getOperand(1);
807 EVT ValTy = N->getValueType(0);
808 SDLoc DL(N);
809
810 uint64_t Pos = 0;
811 unsigned SMPos, SMSize;
812 ConstantSDNode *CN;
813 SDValue NewOperand;
814 unsigned Opc;
815
816 // Op's second operand must be a shifted mask.
817 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
818 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
819 return SDValue();
820
821 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
822 // Pattern match EXT.
823 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
824 // => ext $dst, $src, pos, size
825
826 // The second operand of the shift must be an immediate.
827 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
828 return SDValue();
829
830 Pos = CN->getZExtValue();
831
832 // Return if the shifted mask does not start at bit 0 or the sum of its size
833 // and Pos exceeds the word's size.
834 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
835 return SDValue();
836
837 Opc = MipsISD::Ext;
838 NewOperand = FirstOperand.getOperand(0);
839 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {
840 // Pattern match CINS.
841 // $dst = and (shl $src , pos), mask
842 // => cins $dst, $src, pos, size
843 // mask is a shifted mask with consecutive 1's, pos = shift amount,
844 // size = population count.
845
846 // The second operand of the shift must be an immediate.
847 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
848 return SDValue();
849
850 Pos = CN->getZExtValue();
851
852 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
853 Pos + SMSize > ValTy.getSizeInBits())
854 return SDValue();
855
856 NewOperand = FirstOperand.getOperand(0);
857 // SMSize is 'location' (position) in this case, not size.
858 SMSize--;
859 Opc = MipsISD::CIns;
860 } else {
861 // Pattern match EXT.
862 // $dst = and $src, (2**size - 1) , if size > 16
863 // => ext $dst, $src, pos, size , pos = 0
864
865 // If the mask is <= 0xffff, andi can be used instead.
866 if (CN->getZExtValue() <= 0xffff)
867 return SDValue();
868
869 // Return if the mask doesn't start at position 0.
870 if (SMPos)
871 return SDValue();
872
873 Opc = MipsISD::Ext;
874 NewOperand = FirstOperand;
875 }
876 return DAG.getNode(Opc, DL, ValTy, NewOperand,
877 DAG.getConstant(Pos, DL, MVT::i32),
878 DAG.getConstant(SMSize, DL, MVT::i32));
879}
880
883 const MipsSubtarget &Subtarget) {
884 // Pattern match INS.
885 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
886 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
887 // => ins $dst, $src, size, pos, $src1
888 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
889 return SDValue();
890
891 SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
892 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
893 ConstantSDNode *CN, *CN1;
894
895 // See if Op's first operand matches (and $src1 , mask0).
896 if (And0.getOpcode() != ISD::AND)
897 return SDValue();
898
899 if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) ||
900 !isShiftedMask_64(~CN->getSExtValue(), SMPos0, SMSize0))
901 return SDValue();
902
903 // See if Op's second operand matches (and (shl $src, pos), mask1).
904 if (And1.getOpcode() == ISD::AND &&
905 And1.getOperand(0).getOpcode() == ISD::SHL) {
906
907 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
908 !isShiftedMask_64(CN->getZExtValue(), SMPos1, SMSize1))
909 return SDValue();
910
911 // The shift masks must have the same position and size.
912 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
913 return SDValue();
914
915 SDValue Shl = And1.getOperand(0);
916
917 if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1))))
918 return SDValue();
919
920 unsigned Shamt = CN->getZExtValue();
921
922 // Return if the shift amount and the first bit position of mask are not the
923 // same.
924 EVT ValTy = N->getValueType(0);
925 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
926 return SDValue();
927
928 SDLoc DL(N);
929 return DAG.getNode(MipsISD::Ins, DL, ValTy, Shl.getOperand(0),
930 DAG.getConstant(SMPos0, DL, MVT::i32),
931 DAG.getConstant(SMSize0, DL, MVT::i32),
932 And0.getOperand(0));
933 } else {
934 // Pattern match DINS.
935 // $dst = or (and $src, mask0), mask1
936 // where mask0 = ((1 << SMSize0) -1) << SMPos0
937 // => dins $dst, $src, pos, size
938 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
939 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||
940 (SMSize0 + SMPos0 <= 32))) {
941 // Check if AND instruction has constant as argument
942 bool isConstCase = And1.getOpcode() != ISD::AND;
943 if (And1.getOpcode() == ISD::AND) {
944 if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1))))
945 return SDValue();
946 } else {
947 if (!(CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1))))
948 return SDValue();
949 }
950 // Don't generate INS if constant OR operand doesn't fit into bits
951 // cleared by constant AND operand.
952 if (CN->getSExtValue() & CN1->getSExtValue())
953 return SDValue();
954
955 SDLoc DL(N);
956 EVT ValTy = N->getOperand(0)->getValueType(0);
957 SDValue Const1;
958 SDValue SrlX;
959 if (!isConstCase) {
960 Const1 = DAG.getConstant(SMPos0, DL, MVT::i32);
961 SrlX = DAG.getNode(ISD::SRL, DL, And1->getValueType(0), And1, Const1);
962 }
963 return DAG.getNode(
964 MipsISD::Ins, DL, N->getValueType(0),
965 isConstCase
966 ? DAG.getConstant(CN1->getSExtValue() >> SMPos0, DL, ValTy)
967 : SrlX,
968 DAG.getConstant(SMPos0, DL, MVT::i32),
969 DAG.getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
970 : SMSize0,
971 DL, MVT::i32),
972 And0->getOperand(0));
973
974 }
975 return SDValue();
976 }
977}
978
980 const MipsSubtarget &Subtarget) {
981 // ROOTNode must have a multiplication as an operand for the match to be
982 // successful.
983 if (ROOTNode->getOperand(0).getOpcode() != ISD::MUL &&
984 ROOTNode->getOperand(1).getOpcode() != ISD::MUL)
985 return SDValue();
986
987 // In the case where we have a multiplication as the left operand of
988 // of a subtraction, we can't combine into a MipsISD::MSub node as the
989 // the instruction definition of msub(u) places the multiplication on
990 // on the right.
991 if (ROOTNode->getOpcode() == ISD::SUB &&
992 ROOTNode->getOperand(0).getOpcode() == ISD::MUL)
993 return SDValue();
994
995 // We don't handle vector types here.
996 if (ROOTNode->getValueType(0).isVector())
997 return SDValue();
998
999 // For MIPS64, madd / msub instructions are inefficent to use with 64 bit
1000 // arithmetic. E.g.
1001 // (add (mul a b) c) =>
1002 // let res = (madd (mthi (drotr c 32))x(mtlo c) a b) in
1003 // MIPS64: (or (dsll (mfhi res) 32) (dsrl (dsll (mflo res) 32) 32)
1004 // or
1005 // MIPS64R2: (dins (mflo res) (mfhi res) 32 32)
1006 //
1007 // The overhead of setting up the Hi/Lo registers and reassembling the
1008 // result makes this a dubious optimzation for MIPS64. The core of the
1009 // problem is that Hi/Lo contain the upper and lower 32 bits of the
1010 // operand and result.
1011 //
1012 // It requires a chain of 4 add/mul for MIPS64R2 to get better code
1013 // density than doing it naively, 5 for MIPS64. Additionally, using
1014 // madd/msub on MIPS64 requires the operands actually be 32 bit sign
1015 // extended operands, not true 64 bit values.
1016 //
1017 // FIXME: For the moment, disable this completely for MIPS64.
1018 if (Subtarget.hasMips64())
1019 return SDValue();
1020
1021 SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1022 ? ROOTNode->getOperand(0)
1023 : ROOTNode->getOperand(1);
1024
1025 SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1026 ? ROOTNode->getOperand(1)
1027 : ROOTNode->getOperand(0);
1028
1029 // Transform this to a MADD only if the user of this node is the add.
1030 // If there are other users of the mul, this function returns here.
1031 if (!Mult.hasOneUse())
1032 return SDValue();
1033
1034 // maddu and madd are unusual instructions in that on MIPS64 bits 63..31
1035 // must be in canonical form, i.e. sign extended. For MIPS32, the operands
1036 // of the multiply must have 32 or more sign bits, otherwise we cannot
1037 // perform this optimization. We have to check this here as we're performing
1038 // this optimization pre-legalization.
1039 SDValue MultLHS = Mult->getOperand(0);
1040 SDValue MultRHS = Mult->getOperand(1);
1041
1042 bool IsSigned = MultLHS->getOpcode() == ISD::SIGN_EXTEND &&
1043 MultRHS->getOpcode() == ISD::SIGN_EXTEND;
1044 bool IsUnsigned = MultLHS->getOpcode() == ISD::ZERO_EXTEND &&
1045 MultRHS->getOpcode() == ISD::ZERO_EXTEND;
1046
1047 if (!IsSigned && !IsUnsigned)
1048 return SDValue();
1049
1050 // Initialize accumulator.
1051 SDLoc DL(ROOTNode);
1052 SDValue BottomHalf, TopHalf;
1053 std::tie(BottomHalf, TopHalf) =
1054 CurDAG.SplitScalar(AddOperand, DL, MVT::i32, MVT::i32);
1055 SDValue ACCIn =
1056 CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, BottomHalf, TopHalf);
1057
1058 // Create MipsMAdd(u) / MipsMSub(u) node.
1059 bool IsAdd = ROOTNode->getOpcode() == ISD::ADD;
1060 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
1061 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1062 SDValue MAddOps[3] = {
1063 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(0)),
1064 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(1)), ACCIn};
1065 EVT VTs[2] = {MVT::i32, MVT::i32};
1066 SDValue MAdd = CurDAG.getNode(Opcode, DL, VTs, MAddOps);
1067
1068 SDValue ResLo = CurDAG.getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
1069 SDValue ResHi = CurDAG.getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
1070 SDValue Combined =
1071 CurDAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResLo, ResHi);
1072 return Combined;
1073}
1074
1077 const MipsSubtarget &Subtarget) {
1078 // (sub v0 (mul v1, v2)) => (msub v1, v2, v0)
1079 if (DCI.isBeforeLegalizeOps()) {
1080 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1081 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1082 return performMADD_MSUBCombine(N, DAG, Subtarget);
1083
1084 return SDValue();
1085 }
1086
1087 return SDValue();
1088}
1089
1092 const MipsSubtarget &Subtarget) {
1093 // (add v0 (mul v1, v2)) => (madd v1, v2, v0)
1094 if (DCI.isBeforeLegalizeOps()) {
1095 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1096 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1097 return performMADD_MSUBCombine(N, DAG, Subtarget);
1098
1099 return SDValue();
1100 }
1101
1102 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
1103 SDValue Add = N->getOperand(1);
1104
1105 if (Add.getOpcode() != ISD::ADD)
1106 return SDValue();
1107
1108 SDValue Lo = Add.getOperand(1);
1109
1110 if ((Lo.getOpcode() != MipsISD::Lo) ||
1111 (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
1112 return SDValue();
1113
1114 EVT ValTy = N->getValueType(0);
1115 SDLoc DL(N);
1116
1117 SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
1118 Add.getOperand(0));
1119 return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
1120}
1121
1124 const MipsSubtarget &Subtarget) {
1125 // Pattern match CINS.
1126 // $dst = shl (and $src , imm), pos
1127 // => cins $dst, $src, pos, size
1128
1129 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasCnMips())
1130 return SDValue();
1131
1132 SDValue FirstOperand = N->getOperand(0);
1133 unsigned FirstOperandOpc = FirstOperand.getOpcode();
1134 SDValue SecondOperand = N->getOperand(1);
1135 EVT ValTy = N->getValueType(0);
1136 SDLoc DL(N);
1137
1138 uint64_t Pos = 0;
1139 unsigned SMPos, SMSize;
1140 ConstantSDNode *CN;
1141 SDValue NewOperand;
1142
1143 // The second operand of the shift must be an immediate.
1144 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1145 return SDValue();
1146
1147 Pos = CN->getZExtValue();
1148
1149 if (Pos >= ValTy.getSizeInBits())
1150 return SDValue();
1151
1152 if (FirstOperandOpc != ISD::AND)
1153 return SDValue();
1154
1155 // AND's second operand must be a shifted mask.
1156 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
1157 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
1158 return SDValue();
1159
1160 // Return if the shifted mask does not start at bit 0 or the sum of its size
1161 // and Pos exceeds the word's size.
1162 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1163 return SDValue();
1164
1165 NewOperand = FirstOperand.getOperand(0);
1166 // SMSize is 'location' (position) in this case, not size.
1167 SMSize--;
1168
1169 return DAG.getNode(MipsISD::CIns, DL, ValTy, NewOperand,
1170 DAG.getConstant(Pos, DL, MVT::i32),
1171 DAG.getConstant(SMSize, DL, MVT::i32));
1172}
1173
1175 const {
1176 SelectionDAG &DAG = DCI.DAG;
1177 unsigned Opc = N->getOpcode();
1178
1179 switch (Opc) {
1180 default: break;
1181 case ISD::SDIVREM:
1182 case ISD::UDIVREM:
1183 return performDivRemCombine(N, DAG, DCI, Subtarget);
1184 case ISD::SELECT:
1185 return performSELECTCombine(N, DAG, DCI, Subtarget);
1186 case MipsISD::CMovFP_F:
1187 case MipsISD::CMovFP_T:
1188 return performCMovFPCombine(N, DAG, DCI, Subtarget);
1189 case ISD::AND:
1190 return performANDCombine(N, DAG, DCI, Subtarget);
1191 case ISD::OR:
1192 return performORCombine(N, DAG, DCI, Subtarget);
1193 case ISD::ADD:
1194 return performADDCombine(N, DAG, DCI, Subtarget);
1195 case ISD::SHL:
1196 return performSHLCombine(N, DAG, DCI, Subtarget);
1197 case ISD::SUB:
1198 return performSUBCombine(N, DAG, DCI, Subtarget);
1199 }
1200
1201 return SDValue();
1202}
1203
1205 return Subtarget.hasMips32();
1206}
1207
1209 return Subtarget.hasMips32();
1210}
1211
1213 // We can use ANDI+SLTIU as a bit test. Y contains the bit position.
1214 // For MIPSR2 or later, we may be able to use the `ext` instruction or its'
1215 // double-word variants.
1216 if (auto *C = dyn_cast<ConstantSDNode>(Y))
1217 return C->getAPIntValue().ule(15);
1218
1219 return false;
1220}
1221
1223 const SDNode *N, CombineLevel Level) const {
1224 assert(((N->getOpcode() == ISD::SHL &&
1225 N->getOperand(0).getOpcode() == ISD::SRL) ||
1226 (N->getOpcode() == ISD::SRL &&
1227 N->getOperand(0).getOpcode() == ISD::SHL)) &&
1228 "Expected shift-shift mask");
1229
1230 if (N->getOperand(0).getValueType().isVector())
1231 return false;
1232 return true;
1233}
1234
1235void
1238 SelectionDAG &DAG) const {
1239 return LowerOperationWrapper(N, Results, DAG);
1240}
1241
1244{
1245 switch (Op.getOpcode())
1246 {
1247 case ISD::BRCOND: return lowerBRCOND(Op, DAG);
1248 case ISD::ConstantPool: return lowerConstantPool(Op, DAG);
1249 case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG);
1250 case ISD::BlockAddress: return lowerBlockAddress(Op, DAG);
1251 case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG);
1252 case ISD::JumpTable: return lowerJumpTable(Op, DAG);
1253 case ISD::SELECT: return lowerSELECT(Op, DAG);
1254 case ISD::SETCC: return lowerSETCC(Op, DAG);
1255 case ISD::VASTART: return lowerVASTART(Op, DAG);
1256 case ISD::VAARG: return lowerVAARG(Op, DAG);
1257 case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
1258 case ISD::FABS: return lowerFABS(Op, DAG);
1259 case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
1260 case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
1261 case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
1262 case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
1263 case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
1264 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
1265 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);
1266 case ISD::LOAD: return lowerLOAD(Op, DAG);
1267 case ISD::STORE: return lowerSTORE(Op, DAG);
1268 case ISD::EH_DWARF_CFA: return lowerEH_DWARF_CFA(Op, DAG);
1269 case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
1270 }
1271 return SDValue();
1272}
1273
1274//===----------------------------------------------------------------------===//
1275// Lower helper functions
1276//===----------------------------------------------------------------------===//
1277
1278// addLiveIn - This helper function adds the specified physical register to the
1279// MachineFunction as a live in value. It also creates a corresponding
1280// virtual register for it.
1281static unsigned
1282addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
1283{
1285 MF.getRegInfo().addLiveIn(PReg, VReg);
1286 return VReg;
1287}
1288
1291 const TargetInstrInfo &TII,
1292 bool Is64Bit, bool IsMicroMips) {
1293 if (NoZeroDivCheck)
1294 return &MBB;
1295
1296 // Insert instruction "teq $divisor_reg, $zero, 7".
1299 MachineOperand &Divisor = MI.getOperand(2);
1300 MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),
1301 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1302 .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
1303 .addReg(Mips::ZERO)
1304 .addImm(7);
1305
1306 // Use the 32-bit sub-register if this is a 64-bit division.
1307 if (Is64Bit)
1308 MIB->getOperand(0).setSubReg(Mips::sub_32);
1309
1310 // Clear Divisor's kill flag.
1311 Divisor.setIsKill(false);
1312
1313 // We would normally delete the original instruction here but in this case
1314 // we only needed to inject an additional instruction rather than replace it.
1315
1316 return &MBB;
1317}
1318
1321 MachineBasicBlock *BB) const {
1322 switch (MI.getOpcode()) {
1323 default:
1324 llvm_unreachable("Unexpected instr type to insert");
1325 case Mips::ATOMIC_LOAD_ADD_I8:
1326 return emitAtomicBinaryPartword(MI, BB, 1);
1327 case Mips::ATOMIC_LOAD_ADD_I16:
1328 return emitAtomicBinaryPartword(MI, BB, 2);
1329 case Mips::ATOMIC_LOAD_ADD_I32:
1330 return emitAtomicBinary(MI, BB);
1331 case Mips::ATOMIC_LOAD_ADD_I64:
1332 return emitAtomicBinary(MI, BB);
1333
1334 case Mips::ATOMIC_LOAD_AND_I8:
1335 return emitAtomicBinaryPartword(MI, BB, 1);
1336 case Mips::ATOMIC_LOAD_AND_I16:
1337 return emitAtomicBinaryPartword(MI, BB, 2);
1338 case Mips::ATOMIC_LOAD_AND_I32:
1339 return emitAtomicBinary(MI, BB);
1340 case Mips::ATOMIC_LOAD_AND_I64:
1341 return emitAtomicBinary(MI, BB);
1342
1343 case Mips::ATOMIC_LOAD_OR_I8:
1344 return emitAtomicBinaryPartword(MI, BB, 1);
1345 case Mips::ATOMIC_LOAD_OR_I16:
1346 return emitAtomicBinaryPartword(MI, BB, 2);
1347 case Mips::ATOMIC_LOAD_OR_I32:
1348 return emitAtomicBinary(MI, BB);
1349 case Mips::ATOMIC_LOAD_OR_I64:
1350 return emitAtomicBinary(MI, BB);
1351
1352 case Mips::ATOMIC_LOAD_XOR_I8:
1353 return emitAtomicBinaryPartword(MI, BB, 1);
1354 case Mips::ATOMIC_LOAD_XOR_I16:
1355 return emitAtomicBinaryPartword(MI, BB, 2);
1356 case Mips::ATOMIC_LOAD_XOR_I32:
1357 return emitAtomicBinary(MI, BB);
1358 case Mips::ATOMIC_LOAD_XOR_I64:
1359 return emitAtomicBinary(MI, BB);
1360
1361 case Mips::ATOMIC_LOAD_NAND_I8:
1362 return emitAtomicBinaryPartword(MI, BB, 1);
1363 case Mips::ATOMIC_LOAD_NAND_I16:
1364 return emitAtomicBinaryPartword(MI, BB, 2);
1365 case Mips::ATOMIC_LOAD_NAND_I32:
1366 return emitAtomicBinary(MI, BB);
1367 case Mips::ATOMIC_LOAD_NAND_I64:
1368 return emitAtomicBinary(MI, BB);
1369
1370 case Mips::ATOMIC_LOAD_SUB_I8:
1371 return emitAtomicBinaryPartword(MI, BB, 1);
1372 case Mips::ATOMIC_LOAD_SUB_I16:
1373 return emitAtomicBinaryPartword(MI, BB, 2);
1374 case Mips::ATOMIC_LOAD_SUB_I32:
1375 return emitAtomicBinary(MI, BB);
1376 case Mips::ATOMIC_LOAD_SUB_I64:
1377 return emitAtomicBinary(MI, BB);
1378
1379 case Mips::ATOMIC_SWAP_I8:
1380 return emitAtomicBinaryPartword(MI, BB, 1);
1381 case Mips::ATOMIC_SWAP_I16:
1382 return emitAtomicBinaryPartword(MI, BB, 2);
1383 case Mips::ATOMIC_SWAP_I32:
1384 return emitAtomicBinary(MI, BB);
1385 case Mips::ATOMIC_SWAP_I64:
1386 return emitAtomicBinary(MI, BB);
1387
1388 case Mips::ATOMIC_CMP_SWAP_I8:
1389 return emitAtomicCmpSwapPartword(MI, BB, 1);
1390 case Mips::ATOMIC_CMP_SWAP_I16:
1391 return emitAtomicCmpSwapPartword(MI, BB, 2);
1392 case Mips::ATOMIC_CMP_SWAP_I32:
1393 return emitAtomicCmpSwap(MI, BB);
1394 case Mips::ATOMIC_CMP_SWAP_I64:
1395 return emitAtomicCmpSwap(MI, BB);
1396
1397 case Mips::ATOMIC_LOAD_MIN_I8:
1398 return emitAtomicBinaryPartword(MI, BB, 1);
1399 case Mips::ATOMIC_LOAD_MIN_I16:
1400 return emitAtomicBinaryPartword(MI, BB, 2);
1401 case Mips::ATOMIC_LOAD_MIN_I32:
1402 return emitAtomicBinary(MI, BB);
1403 case Mips::ATOMIC_LOAD_MIN_I64:
1404 return emitAtomicBinary(MI, BB);
1405
1406 case Mips::ATOMIC_LOAD_MAX_I8:
1407 return emitAtomicBinaryPartword(MI, BB, 1);
1408 case Mips::ATOMIC_LOAD_MAX_I16:
1409 return emitAtomicBinaryPartword(MI, BB, 2);
1410 case Mips::ATOMIC_LOAD_MAX_I32:
1411 return emitAtomicBinary(MI, BB);
1412 case Mips::ATOMIC_LOAD_MAX_I64:
1413 return emitAtomicBinary(MI, BB);
1414
1415 case Mips::ATOMIC_LOAD_UMIN_I8:
1416 return emitAtomicBinaryPartword(MI, BB, 1);
1417 case Mips::ATOMIC_LOAD_UMIN_I16:
1418 return emitAtomicBinaryPartword(MI, BB, 2);
1419 case Mips::ATOMIC_LOAD_UMIN_I32:
1420 return emitAtomicBinary(MI, BB);
1421 case Mips::ATOMIC_LOAD_UMIN_I64:
1422 return emitAtomicBinary(MI, BB);
1423
1424 case Mips::ATOMIC_LOAD_UMAX_I8:
1425 return emitAtomicBinaryPartword(MI, BB, 1);
1426 case Mips::ATOMIC_LOAD_UMAX_I16:
1427 return emitAtomicBinaryPartword(MI, BB, 2);
1428 case Mips::ATOMIC_LOAD_UMAX_I32:
1429 return emitAtomicBinary(MI, BB);
1430 case Mips::ATOMIC_LOAD_UMAX_I64:
1431 return emitAtomicBinary(MI, BB);
1432
1433 case Mips::PseudoSDIV:
1434 case Mips::PseudoUDIV:
1435 case Mips::DIV:
1436 case Mips::DIVU:
1437 case Mips::MOD:
1438 case Mips::MODU:
1439 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false,
1440 false);
1441 case Mips::SDIV_MM_Pseudo:
1442 case Mips::UDIV_MM_Pseudo:
1443 case Mips::SDIV_MM:
1444 case Mips::UDIV_MM:
1445 case Mips::DIV_MMR6:
1446 case Mips::DIVU_MMR6:
1447 case Mips::MOD_MMR6:
1448 case Mips::MODU_MMR6:
1449 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false, true);
1450 case Mips::PseudoDSDIV:
1451 case Mips::PseudoDUDIV:
1452 case Mips::DDIV:
1453 case Mips::DDIVU:
1454 case Mips::DMOD:
1455 case Mips::DMODU:
1456 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true, false);
1457
1458 case Mips::PseudoSELECT_I:
1459 case Mips::PseudoSELECT_I64:
1460 case Mips::PseudoSELECT_S:
1461 case Mips::PseudoSELECT_D32:
1462 case Mips::PseudoSELECT_D64:
1463 return emitPseudoSELECT(MI, BB, false, Mips::BNE);
1464 case Mips::PseudoSELECTFP_F_I:
1465 case Mips::PseudoSELECTFP_F_I64:
1466 case Mips::PseudoSELECTFP_F_S:
1467 case Mips::PseudoSELECTFP_F_D32:
1468 case Mips::PseudoSELECTFP_F_D64:
1469 return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
1470 case Mips::PseudoSELECTFP_T_I:
1471 case Mips::PseudoSELECTFP_T_I64:
1472 case Mips::PseudoSELECTFP_T_S:
1473 case Mips::PseudoSELECTFP_T_D32:
1474 case Mips::PseudoSELECTFP_T_D64:
1475 return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
1476 case Mips::PseudoD_SELECT_I:
1477 case Mips::PseudoD_SELECT_I64:
1478 return emitPseudoD_SELECT(MI, BB);
1479 case Mips::LDR_W:
1480 return emitLDR_W(MI, BB);
1481 case Mips::LDR_D:
1482 return emitLDR_D(MI, BB);
1483 case Mips::STR_W:
1484 return emitSTR_W(MI, BB);
1485 case Mips::STR_D:
1486 return emitSTR_D(MI, BB);
1487 }
1488}
1489
1490// This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
1491// Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
1493MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
1494 MachineBasicBlock *BB) const {
1495
1496 MachineFunction *MF = BB->getParent();
1497 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1499 DebugLoc DL = MI.getDebugLoc();
1500
1501 unsigned AtomicOp;
1502 bool NeedsAdditionalReg = false;
1503 switch (MI.getOpcode()) {
1504 case Mips::ATOMIC_LOAD_ADD_I32:
1505 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1506 break;
1507 case Mips::ATOMIC_LOAD_SUB_I32:
1508 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1509 break;
1510 case Mips::ATOMIC_LOAD_AND_I32:
1511 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1512 break;
1513 case Mips::ATOMIC_LOAD_OR_I32:
1514 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1515 break;
1516 case Mips::ATOMIC_LOAD_XOR_I32:
1517 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1518 break;
1519 case Mips::ATOMIC_LOAD_NAND_I32:
1520 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1521 break;
1522 case Mips::ATOMIC_SWAP_I32:
1523 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1524 break;
1525 case Mips::ATOMIC_LOAD_ADD_I64:
1526 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1527 break;
1528 case Mips::ATOMIC_LOAD_SUB_I64:
1529 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1530 break;
1531 case Mips::ATOMIC_LOAD_AND_I64:
1532 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1533 break;
1534 case Mips::ATOMIC_LOAD_OR_I64:
1535 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1536 break;
1537 case Mips::ATOMIC_LOAD_XOR_I64:
1538 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1539 break;
1540 case Mips::ATOMIC_LOAD_NAND_I64:
1541 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1542 break;
1543 case Mips::ATOMIC_SWAP_I64:
1544 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1545 break;
1546 case Mips::ATOMIC_LOAD_MIN_I32:
1547 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1548 NeedsAdditionalReg = true;
1549 break;
1550 case Mips::ATOMIC_LOAD_MAX_I32:
1551 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1552 NeedsAdditionalReg = true;
1553 break;
1554 case Mips::ATOMIC_LOAD_UMIN_I32:
1555 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1556 NeedsAdditionalReg = true;
1557 break;
1558 case Mips::ATOMIC_LOAD_UMAX_I32:
1559 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1560 NeedsAdditionalReg = true;
1561 break;
1562 case Mips::ATOMIC_LOAD_MIN_I64:
1563 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1564 NeedsAdditionalReg = true;
1565 break;
1566 case Mips::ATOMIC_LOAD_MAX_I64:
1567 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1568 NeedsAdditionalReg = true;
1569 break;
1570 case Mips::ATOMIC_LOAD_UMIN_I64:
1571 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1572 NeedsAdditionalReg = true;
1573 break;
1574 case Mips::ATOMIC_LOAD_UMAX_I64:
1575 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1576 NeedsAdditionalReg = true;
1577 break;
1578 default:
1579 llvm_unreachable("Unknown pseudo atomic for replacement!");
1580 }
1581
1582 Register OldVal = MI.getOperand(0).getReg();
1583 Register Ptr = MI.getOperand(1).getReg();
1584 Register Incr = MI.getOperand(2).getReg();
1585 Register Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1586
1588
1589 // The scratch registers here with the EarlyClobber | Define | Implicit
1590 // flags is used to persuade the register allocator and the machine
1591 // verifier to accept the usage of this register. This has to be a real
1592 // register which has an UNDEF value but is dead after the instruction which
1593 // is unique among the registers chosen for the instruction.
1594
1595 // The EarlyClobber flag has the semantic properties that the operand it is
1596 // attached to is clobbered before the rest of the inputs are read. Hence it
1597 // must be unique among the operands to the instruction.
1598 // The Define flag is needed to coerce the machine verifier that an Undef
1599 // value isn't a problem.
1600 // The Dead flag is needed as the value in scratch isn't used by any other
1601 // instruction. Kill isn't used as Dead is more precise.
1602 // The implicit flag is here due to the interaction between the other flags
1603 // and the machine verifier.
1604
1605 // For correctness purpose, a new pseudo is introduced here. We need this
1606 // new pseudo, so that FastRegisterAllocator does not see an ll/sc sequence
1607 // that is spread over >1 basic blocks. A register allocator which
1608 // introduces (or any codegen infact) a store, can violate the expectations
1609 // of the hardware.
1610 //
1611 // An atomic read-modify-write sequence starts with a linked load
1612 // instruction and ends with a store conditional instruction. The atomic
1613 // read-modify-write sequence fails if any of the following conditions
1614 // occur between the execution of ll and sc:
1615 // * A coherent store is completed by another process or coherent I/O
1616 // module into the block of synchronizable physical memory containing
1617 // the word. The size and alignment of the block is
1618 // implementation-dependent.
1619 // * A coherent store is executed between an LL and SC sequence on the
1620 // same processor to the block of synchornizable physical memory
1621 // containing the word.
1622 //
1623
1624 Register PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr));
1625 Register IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr));
1626
1627 BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr);
1628 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1629
1631 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1633 .addReg(PtrCopy)
1634 .addReg(IncrCopy)
1637 if (NeedsAdditionalReg) {
1638 Register Scratch2 =
1639 RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1642 }
1643
1644 MI.eraseFromParent();
1645
1646 return BB;
1647}
1648
1649MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
1650 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
1651 unsigned SrcReg) const {
1653 const DebugLoc &DL = MI.getDebugLoc();
1654
1655 if (Subtarget.hasMips32r2() && Size == 1) {
1656 BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
1657 return BB;
1658 }
1659
1660 if (Subtarget.hasMips32r2() && Size == 2) {
1661 BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
1662 return BB;
1663 }
1664
1665 MachineFunction *MF = BB->getParent();
1667 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1668 Register ScrReg = RegInfo.createVirtualRegister(RC);
1669
1670 assert(Size < 32);
1671 int64_t ShiftImm = 32 - (Size * 8);
1672
1673 BuildMI(BB, DL, TII->get(Mips::SLL), ScrReg).addReg(SrcReg).addImm(ShiftImm);
1674 BuildMI(BB, DL, TII->get(Mips::SRA), DstReg).addReg(ScrReg).addImm(ShiftImm);
1675
1676 return BB;
1677}
1678
1679MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
1680 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1681 assert((Size == 1 || Size == 2) &&
1682 "Unsupported size for EmitAtomicBinaryPartial.");
1683
1684 MachineFunction *MF = BB->getParent();
1686 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1687 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1688 const TargetRegisterClass *RCp =
1689 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1691 DebugLoc DL = MI.getDebugLoc();
1692
1693 Register Dest = MI.getOperand(0).getReg();
1694 Register Ptr = MI.getOperand(1).getReg();
1695 Register Incr = MI.getOperand(2).getReg();
1696
1697 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1698 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1699 Register Mask = RegInfo.createVirtualRegister(RC);
1700 Register Mask2 = RegInfo.createVirtualRegister(RC);
1701 Register Incr2 = RegInfo.createVirtualRegister(RC);
1702 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1703 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1704 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1705 Register Scratch = RegInfo.createVirtualRegister(RC);
1706 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1707 Register Scratch3 = RegInfo.createVirtualRegister(RC);
1708
1709 unsigned AtomicOp = 0;
1710 bool NeedsAdditionalReg = false;
1711 switch (MI.getOpcode()) {
1712 case Mips::ATOMIC_LOAD_NAND_I8:
1713 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1714 break;
1715 case Mips::ATOMIC_LOAD_NAND_I16:
1716 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1717 break;
1718 case Mips::ATOMIC_SWAP_I8:
1719 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1720 break;
1721 case Mips::ATOMIC_SWAP_I16:
1722 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1723 break;
1724 case Mips::ATOMIC_LOAD_ADD_I8:
1725 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1726 break;
1727 case Mips::ATOMIC_LOAD_ADD_I16:
1728 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1729 break;
1730 case Mips::ATOMIC_LOAD_SUB_I8:
1731 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1732 break;
1733 case Mips::ATOMIC_LOAD_SUB_I16:
1734 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1735 break;
1736 case Mips::ATOMIC_LOAD_AND_I8:
1737 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1738 break;
1739 case Mips::ATOMIC_LOAD_AND_I16:
1740 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1741 break;
1742 case Mips::ATOMIC_LOAD_OR_I8:
1743 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1744 break;
1745 case Mips::ATOMIC_LOAD_OR_I16:
1746 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1747 break;
1748 case Mips::ATOMIC_LOAD_XOR_I8:
1749 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1750 break;
1751 case Mips::ATOMIC_LOAD_XOR_I16:
1752 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1753 break;
1754 case Mips::ATOMIC_LOAD_MIN_I8:
1755 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1756 NeedsAdditionalReg = true;
1757 break;
1758 case Mips::ATOMIC_LOAD_MIN_I16:
1759 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1760 NeedsAdditionalReg = true;
1761 break;
1762 case Mips::ATOMIC_LOAD_MAX_I8:
1763 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1764 NeedsAdditionalReg = true;
1765 break;
1766 case Mips::ATOMIC_LOAD_MAX_I16:
1767 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1768 NeedsAdditionalReg = true;
1769 break;
1770 case Mips::ATOMIC_LOAD_UMIN_I8:
1771 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1772 NeedsAdditionalReg = true;
1773 break;
1774 case Mips::ATOMIC_LOAD_UMIN_I16:
1775 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1776 NeedsAdditionalReg = true;
1777 break;
1778 case Mips::ATOMIC_LOAD_UMAX_I8:
1779 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1780 NeedsAdditionalReg = true;
1781 break;
1782 case Mips::ATOMIC_LOAD_UMAX_I16:
1783 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1784 NeedsAdditionalReg = true;
1785 break;
1786 default:
1787 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
1788 }
1789
1790 // insert new blocks after the current block
1791 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1792 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1794 MF->insert(It, exitMBB);
1795
1796 // Transfer the remainder of BB and its successor edges to exitMBB.
1797 exitMBB->splice(exitMBB->begin(), BB,
1798 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1800
1802
1803 // thisMBB:
1804 // addiu masklsb2,$0,-4 # 0xfffffffc
1805 // and alignedaddr,ptr,masklsb2
1806 // andi ptrlsb2,ptr,3
1807 // sll shiftamt,ptrlsb2,3
1808 // ori maskupper,$0,255 # 0xff
1809 // sll mask,maskupper,shiftamt
1810 // nor mask2,$0,mask
1811 // sll incr2,incr,shiftamt
1812
1813 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1814 BuildMI(BB, DL, TII->get(ABI.GetPtrAddiuOp()), MaskLSB2)
1815 .addReg(ABI.GetNullPtr()).addImm(-4);
1816 BuildMI(BB, DL, TII->get(ABI.GetPtrAndOp()), AlignedAddr)
1817 .addReg(Ptr).addReg(MaskLSB2);
1818 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1819 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1820 if (Subtarget.isLittle()) {
1821 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1822 } else {
1823 Register Off = RegInfo.createVirtualRegister(RC);
1824 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1825 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1826 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1827 }
1828 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1829 .addReg(Mips::ZERO).addImm(MaskImm);
1830 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1831 .addReg(MaskUpper).addReg(ShiftAmt);
1832 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1833 BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
1834
1835
1836 // The purposes of the flags on the scratch registers is explained in
1837 // emitAtomicBinary. In summary, we need a scratch register which is going to
1838 // be undef, that is unique among registers chosen for the instruction.
1839
1841 BuildMI(BB, DL, TII->get(AtomicOp))
1843 .addReg(AlignedAddr)
1844 .addReg(Incr2)
1845 .addReg(Mask)
1846 .addReg(Mask2)
1847 .addReg(ShiftAmt)
1854 if (NeedsAdditionalReg) {
1855 Register Scratch4 = RegInfo.createVirtualRegister(RC);
1858 }
1859
1860 MI.eraseFromParent(); // The instruction is gone now.
1861
1862 return exitMBB;
1863}
1864
1865// Lower atomic compare and swap to a pseudo instruction, taking care to
1866// define a scratch register for the pseudo instruction's expansion. The
1867// instruction is expanded after the register allocator as to prevent
1868// the insertion of stores between the linked load and the store conditional.
1869
1871MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
1872 MachineBasicBlock *BB) const {
1873
1874 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1875 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1876 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1877
1878 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1879
1880 MachineFunction *MF = BB->getParent();
1884 DebugLoc DL = MI.getDebugLoc();
1885
1886 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1887 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1888 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1889 Register Dest = MI.getOperand(0).getReg();
1890 Register Ptr = MI.getOperand(1).getReg();
1891 Register OldVal = MI.getOperand(2).getReg();
1892 Register NewVal = MI.getOperand(3).getReg();
1893
1894 Register Scratch = MRI.createVirtualRegister(RC);
1896
1897 // We need to create copies of the various registers and kill them at the
1898 // atomic pseudo. If the copies are not made, when the atomic is expanded
1899 // after fast register allocation, the spills will end up outside of the
1900 // blocks that their values are defined in, causing livein errors.
1901
1902 Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
1903 Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
1904 Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
1905
1906 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1907 BuildMI(*BB, II, DL, TII->get(Mips::COPY), OldValCopy).addReg(OldVal);
1908 BuildMI(*BB, II, DL, TII->get(Mips::COPY), NewValCopy).addReg(NewVal);
1909
1910 // The purposes of the flags on the scratch registers is explained in
1911 // emitAtomicBinary. In summary, we need a scratch register which is going to
1912 // be undef, that is unique among registers chosen for the instruction.
1913
1914 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1916 .addReg(PtrCopy, RegState::Kill)
1917 .addReg(OldValCopy, RegState::Kill)
1918 .addReg(NewValCopy, RegState::Kill)
1921
1922 MI.eraseFromParent(); // The instruction is gone now.
1923
1924 return BB;
1925}
1926
1927MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
1928 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1929 assert((Size == 1 || Size == 2) &&
1930 "Unsupported size for EmitAtomicCmpSwapPartial.");
1931
1932 MachineFunction *MF = BB->getParent();
1934 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1935 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1936 const TargetRegisterClass *RCp =
1937 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1939 DebugLoc DL = MI.getDebugLoc();
1940
1941 Register Dest = MI.getOperand(0).getReg();
1942 Register Ptr = MI.getOperand(1).getReg();
1943 Register CmpVal = MI.getOperand(2).getReg();
1944 Register NewVal = MI.getOperand(3).getReg();
1945
1946 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1947 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1948 Register Mask = RegInfo.createVirtualRegister(RC);
1949 Register Mask2 = RegInfo.createVirtualRegister(RC);
1950 Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
1951 Register ShiftedNewVal = RegInfo.createVirtualRegister(RC);
1952 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1953 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1954 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1955 Register MaskedCmpVal = RegInfo.createVirtualRegister(RC);
1956 Register MaskedNewVal = RegInfo.createVirtualRegister(RC);
1957 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1958 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1959 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1960
1961 // The scratch registers here with the EarlyClobber | Define | Dead | Implicit
1962 // flags are used to coerce the register allocator and the machine verifier to
1963 // accept the usage of these registers.
1964 // The EarlyClobber flag has the semantic properties that the operand it is
1965 // attached to is clobbered before the rest of the inputs are read. Hence it
1966 // must be unique among the operands to the instruction.
1967 // The Define flag is needed to coerce the machine verifier that an Undef
1968 // value isn't a problem.
1969 // The Dead flag is needed as the value in scratch isn't used by any other
1970 // instruction. Kill isn't used as Dead is more precise.
1971 Register Scratch = RegInfo.createVirtualRegister(RC);
1972 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1973
1974 // insert new blocks after the current block
1975 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1976 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1978 MF->insert(It, exitMBB);
1979
1980 // Transfer the remainder of BB and its successor edges to exitMBB.
1981 exitMBB->splice(exitMBB->begin(), BB,
1982 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1984
1986
1987 // thisMBB:
1988 // addiu masklsb2,$0,-4 # 0xfffffffc
1989 // and alignedaddr,ptr,masklsb2
1990 // andi ptrlsb2,ptr,3
1991 // xori ptrlsb2,ptrlsb2,3 # Only for BE
1992 // sll shiftamt,ptrlsb2,3
1993 // ori maskupper,$0,255 # 0xff
1994 // sll mask,maskupper,shiftamt
1995 // nor mask2,$0,mask
1996 // andi maskedcmpval,cmpval,255
1997 // sll shiftedcmpval,maskedcmpval,shiftamt
1998 // andi maskednewval,newval,255
1999 // sll shiftednewval,maskednewval,shiftamt
2000 int64_t MaskImm = (Size == 1) ? 255 : 65535;
2001 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
2002 .addReg(ABI.GetNullPtr()).addImm(-4);
2003 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
2004 .addReg(Ptr).addReg(MaskLSB2);
2005 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
2006 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
2007 if (Subtarget.isLittle()) {
2008 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
2009 } else {
2010 Register Off = RegInfo.createVirtualRegister(RC);
2011 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
2012 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
2013 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
2014 }
2015 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
2016 .addReg(Mips::ZERO).addImm(MaskImm);
2017 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
2018 .addReg(MaskUpper).addReg(ShiftAmt);
2019 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
2020 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
2021 .addReg(CmpVal).addImm(MaskImm);
2022 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
2023 .addReg(MaskedCmpVal).addReg(ShiftAmt);
2024 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
2025 .addReg(NewVal).addImm(MaskImm);
2026 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
2027 .addReg(MaskedNewVal).addReg(ShiftAmt);
2028
2029 // The purposes of the flags on the scratch registers are explained in
2030 // emitAtomicBinary. In summary, we need a scratch register which is going to
2031 // be undef, that is unique among the register chosen for the instruction.
2032
2033 BuildMI(BB, DL, TII->get(AtomicOp))
2035 .addReg(AlignedAddr)
2036 .addReg(Mask)
2037 .addReg(ShiftedCmpVal)
2038 .addReg(Mask2)
2039 .addReg(ShiftedNewVal)
2040 .addReg(ShiftAmt)
2045
2046 MI.eraseFromParent(); // The instruction is gone now.
2047
2048 return exitMBB;
2049}
2050
2051SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2052 // The first operand is the chain, the second is the condition, the third is
2053 // the block to branch to if the condition is true.
2054 SDValue Chain = Op.getOperand(0);
2055 SDValue Dest = Op.getOperand(2);
2056 SDLoc DL(Op);
2057
2059 SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
2060
2061 // Return if flag is not set by a floating point comparison.
2062 if (CondRes.getOpcode() != MipsISD::FPCmp)
2063 return Op;
2064
2065 SDValue CCNode = CondRes.getOperand(2);
2068 SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
2069 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
2070 return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
2071 FCC0, Dest, CondRes);
2072}
2073
2074SDValue MipsTargetLowering::
2075lowerSELECT(SDValue Op, SelectionDAG &DAG) const
2076{
2078 SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
2079
2080 // Return if flag is not set by a floating point comparison.
2081 if (Cond.getOpcode() != MipsISD::FPCmp)
2082 return Op;
2083
2084 return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
2085 SDLoc(Op));
2086}
2087
2088SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2090 SDValue Cond = createFPCmp(DAG, Op);
2091
2092 assert(Cond.getOpcode() == MipsISD::FPCmp &&
2093 "Floating point operand expected.");
2094
2095 SDLoc DL(Op);
2096 SDValue True = DAG.getConstant(1, DL, MVT::i32);
2097 SDValue False = DAG.getConstant(0, DL, MVT::i32);
2098
2099 return createCMovFP(DAG, Cond, True, False, DL);
2100}
2101
2102SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
2103 SelectionDAG &DAG) const {
2104 EVT Ty = Op.getValueType();
2105 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2106 const GlobalValue *GV = N->getGlobal();
2107
2108 if (!isPositionIndependent()) {
2109 const MipsTargetObjectFile *TLOF =
2110 static_cast<const MipsTargetObjectFile *>(
2112 const GlobalObject *GO = GV->getAliaseeObject();
2113 if (GO && TLOF->IsGlobalInSmallSection(GO, getTargetMachine()))
2114 // %gp_rel relocation
2115 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2116
2117 // %hi/%lo relocation
2118 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2119 // %highest/%higher/%hi/%lo relocation
2120 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2121 }
2122
2123 // Every other architecture would use shouldAssumeDSOLocal in here, but
2124 // mips is special.
2125 // * In PIC code mips requires got loads even for local statics!
2126 // * To save on got entries, for local statics the got entry contains the
2127 // page and an additional add instruction takes care of the low bits.
2128 // * It is legal to access a hidden symbol with a non hidden undefined,
2129 // so one cannot guarantee that all access to a hidden symbol will know
2130 // it is hidden.
2131 // * Mips linkers don't support creating a page and a full got entry for
2132 // the same symbol.
2133 // * Given all that, we have to use a full got entry for hidden symbols :-(
2134 if (GV->hasLocalLinkage())
2135 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2136
2137 if (Subtarget.useXGOT())
2138 return getAddrGlobalLargeGOT(
2140 DAG.getEntryNode(),
2141 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2142
2143 return getAddrGlobal(
2144 N, SDLoc(N), Ty, DAG,
2146 DAG.getEntryNode(), MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2147}
2148
2149SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
2150 SelectionDAG &DAG) const {
2151 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2152 EVT Ty = Op.getValueType();
2153
2154 if (!isPositionIndependent())
2155 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2156 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2157
2158 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2159}
2160
2161SDValue MipsTargetLowering::
2162lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
2163{
2164 // If the relocation model is PIC, use the General Dynamic TLS Model or
2165 // Local Dynamic TLS model, otherwise use the Initial Exec or
2166 // Local Exec TLS Model.
2167
2168 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2169 if (DAG.getTarget().useEmulatedTLS())
2170 return LowerToTLSEmulatedModel(GA, DAG);
2171
2172 SDLoc DL(GA);
2173 const GlobalValue *GV = GA->getGlobal();
2174 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2175
2177
2178 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2179 // General Dynamic and Local Dynamic TLS Model.
2180 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
2182
2183 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, Flag);
2185 getGlobalReg(DAG, PtrVT), TGA);
2186 unsigned PtrSize = PtrVT.getSizeInBits();
2187 IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
2188
2189 SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
2190
2192 ArgListEntry Entry;
2193 Entry.Node = Argument;
2194 Entry.Ty = PtrTy;
2195 Args.push_back(Entry);
2196
2198 CLI.setDebugLoc(DL)
2199 .setChain(DAG.getEntryNode())
2200 .setLibCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2201 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2202
2203 SDValue Ret = CallResult.first;
2204
2205 if (model != TLSModel::LocalDynamic)
2206 return Ret;
2207
2208 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2210 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2211 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2213 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2214 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Ret);
2215 return DAG.getNode(ISD::ADD, DL, PtrVT, Add, Lo);
2216 }
2217
2219 if (model == TLSModel::InitialExec) {
2220 // Initial Exec TLS Model
2221 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2223 TGA = DAG.getNode(MipsISD::Wrapper, DL, PtrVT, getGlobalReg(DAG, PtrVT),
2224 TGA);
2225 Offset =
2226 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), TGA, MachinePointerInfo());
2227 } else {
2228 // Local Exec TLS Model
2229 assert(model == TLSModel::LocalExec);
2230 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2232 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2234 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2235 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2236 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2237 }
2238
2240 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, Offset);
2241}
2242
2243SDValue MipsTargetLowering::
2244lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
2245{
2246 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2247 EVT Ty = Op.getValueType();
2248
2249 if (!isPositionIndependent())
2250 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2251 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2252
2253 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2254}
2255
2256SDValue MipsTargetLowering::
2257lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
2258{
2259 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2260 EVT Ty = Op.getValueType();
2261
2262 if (!isPositionIndependent()) {
2263 const MipsTargetObjectFile *TLOF =
2264 static_cast<const MipsTargetObjectFile *>(
2266
2267 if (TLOF->IsConstantInSmallSection(DAG.getDataLayout(), N->getConstVal(),
2269 // %gp_rel relocation
2270 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2271
2272 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2273 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2274 }
2275
2276 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2277}
2278
2279SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2281 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
2282
2283 SDLoc DL(Op);
2284 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2286
2287 // vastart just stores the address of the VarArgsFrameIndex slot into the
2288 // memory location argument.
2289 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2290 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2291 MachinePointerInfo(SV));
2292}
2293
2294SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2295 SDNode *Node = Op.getNode();
2296 EVT VT = Node->getValueType(0);
2297 SDValue Chain = Node->getOperand(0);
2298 SDValue VAListPtr = Node->getOperand(1);
2299 const Align Align =
2300 llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
2301 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2302 SDLoc DL(Node);
2303 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
2304
2305 SDValue VAListLoad = DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, Chain,
2306 VAListPtr, MachinePointerInfo(SV));
2307 SDValue VAList = VAListLoad;
2308
2309 // Re-align the pointer if necessary.
2310 // It should only ever be necessary for 64-bit types on O32 since the minimum
2311 // argument alignment is the same as the maximum type alignment for N32/N64.
2312 //
2313 // FIXME: We currently align too often. The code generator doesn't notice
2314 // when the pointer is still aligned from the last va_arg (or pair of
2315 // va_args for the i64 on O32 case).
2317 VAList = DAG.getNode(
2318 ISD::ADD, DL, VAList.getValueType(), VAList,
2319 DAG.getConstant(Align.value() - 1, DL, VAList.getValueType()));
2320
2321 VAList = DAG.getNode(
2322 ISD::AND, DL, VAList.getValueType(), VAList,
2323 DAG.getConstant(-(int64_t)Align.value(), DL, VAList.getValueType()));
2324 }
2325
2326 // Increment the pointer, VAList, to the next vaarg.
2327 auto &TD = DAG.getDataLayout();
2328 unsigned ArgSizeInBytes =
2330 SDValue Tmp3 =
2331 DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
2332 DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
2333 DL, VAList.getValueType()));
2334 // Store the incremented VAList to the legalized pointer
2335 Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
2336 MachinePointerInfo(SV));
2337
2338 // In big-endian mode we must adjust the pointer when the load size is smaller
2339 // than the argument slot size. We must also reduce the known alignment to
2340 // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
2341 // the correct half of the slot, and reduce the alignment from 8 (slot
2342 // alignment) down to 4 (type alignment).
2343 if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2344 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2345 VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
2346 DAG.getIntPtrConstant(Adjustment, DL));
2347 }
2348 // Load the actual argument out of the pointer VAList
2349 return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo());
2350}
2351
2353 bool HasExtractInsert) {
2354 EVT TyX = Op.getOperand(0).getValueType();
2355 EVT TyY = Op.getOperand(1).getValueType();
2356 SDLoc DL(Op);
2357 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2358 SDValue Const31 = DAG.getConstant(31, DL, MVT::i32);
2359 SDValue Res;
2360
2361 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2362 // to i32.
2363 SDValue X = (TyX == MVT::f32) ?
2364 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
2365 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2366 Const1);
2367 SDValue Y = (TyY == MVT::f32) ?
2368 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
2369 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
2370 Const1);
2371
2372 if (HasExtractInsert) {
2373 // ext E, Y, 31, 1 ; extract bit31 of Y
2374 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
2375 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
2376 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
2377 } else {
2378 // sll SllX, X, 1
2379 // srl SrlX, SllX, 1
2380 // srl SrlY, Y, 31
2381 // sll SllY, SrlX, 31
2382 // or Or, SrlX, SllY
2383 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2384 SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2385 SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
2386 SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
2387 Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
2388 }
2389
2390 if (TyX == MVT::f32)
2391 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
2392
2393 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2394 Op.getOperand(0),
2395 DAG.getConstant(0, DL, MVT::i32));
2396 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2397}
2398
2400 bool HasExtractInsert) {
2401 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2402 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2403 EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
2404 SDLoc DL(Op);
2405 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2406
2407 // Bitcast to integer nodes.
2408 SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
2409 SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
2410
2411 if (HasExtractInsert) {
2412 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
2413 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
2414 SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
2415 DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);
2416
2417 if (WidthX > WidthY)
2418 E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
2419 else if (WidthY > WidthX)
2420 E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
2421
2422 SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
2423 DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,
2424 X);
2425 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
2426 }
2427
2428 // (d)sll SllX, X, 1
2429 // (d)srl SrlX, SllX, 1
2430 // (d)srl SrlY, Y, width(Y)-1
2431 // (d)sll SllY, SrlX, width(Y)-1
2432 // or Or, SrlX, SllY
2433 SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
2434 SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
2435 SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
2436 DAG.getConstant(WidthY - 1, DL, MVT::i32));
2437
2438 if (WidthX > WidthY)
2439 SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
2440 else if (WidthY > WidthX)
2441 SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
2442
2443 SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
2444 DAG.getConstant(WidthX - 1, DL, MVT::i32));
2445 SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
2446 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
2447}
2448
2449SDValue
2450MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2451 if (Subtarget.isGP64bit())
2453
2455}
2456
2457SDValue MipsTargetLowering::lowerFABS32(SDValue Op, SelectionDAG &DAG,
2458 bool HasExtractInsert) const {
2459 SDLoc DL(Op);
2460 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2461
2463 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2464
2465 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2466 // to i32.
2467 SDValue X = (Op.getValueType() == MVT::f32)
2468 ? DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0))
2469 : DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2470 Op.getOperand(0), Const1);
2471
2472 // Clear MSB.
2473 if (HasExtractInsert)
2474 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
2475 DAG.getRegister(Mips::ZERO, MVT::i32),
2476 DAG.getConstant(31, DL, MVT::i32), Const1, X);
2477 else {
2478 // TODO: Provide DAG patterns which transform (and x, cst)
2479 // back to a (shl (srl x (clz cst)) (clz cst)) sequence.
2480 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2481 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2482 }
2483
2484 if (Op.getValueType() == MVT::f32)
2485 return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
2486
2487 // FIXME: For mips32r2, the sequence of (BuildPairF64 (ins (ExtractElementF64
2488 // Op 1), $zero, 31 1) (ExtractElementF64 Op 0)) and the Op has one use, we
2489 // should be able to drop the usage of mfc1/mtc1 and rewrite the register in
2490 // place.
2491 SDValue LowX =
2492 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2493 DAG.getConstant(0, DL, MVT::i32));
2494 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2495}
2496
2497SDValue MipsTargetLowering::lowerFABS64(SDValue Op, SelectionDAG &DAG,
2498 bool HasExtractInsert) const {
2499 SDLoc DL(Op);
2500 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2501
2503 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2504
2505 // Bitcast to integer node.
2506 SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
2507
2508 // Clear MSB.
2509 if (HasExtractInsert)
2510 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
2511 DAG.getRegister(Mips::ZERO_64, MVT::i64),
2512 DAG.getConstant(63, DL, MVT::i32), Const1, X);
2513 else {
2514 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
2515 Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
2516 }
2517
2518 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
2519}
2520
2521SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
2522 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))
2523 return lowerFABS64(Op, DAG, Subtarget.hasExtractInsert());
2524
2525 return lowerFABS32(Op, DAG, Subtarget.hasExtractInsert());
2526}
2527
2528SDValue MipsTargetLowering::
2529lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2530 // check the depth
2531 if (Op.getConstantOperandVal(0) != 0) {
2532 DAG.getContext()->emitError(
2533 "return address can be determined only for current frame");
2534 return SDValue();
2535 }
2536
2538 MFI.setFrameAddressIsTaken(true);
2539 EVT VT = Op.getValueType();
2540 SDLoc DL(Op);
2541 SDValue FrameAddr = DAG.getCopyFromReg(
2542 DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
2543 return FrameAddr;
2544}
2545
2546SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
2547 SelectionDAG &DAG) const {
2549 return SDValue();
2550
2551 // check the depth
2552 if (Op.getConstantOperandVal(0) != 0) {
2553 DAG.getContext()->emitError(
2554 "return address can be determined only for current frame");
2555 return SDValue();
2556 }
2557
2559 MachineFrameInfo &MFI = MF.getFrameInfo();
2560 MVT VT = Op.getSimpleValueType();
2561 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2562 MFI.setReturnAddressIsTaken(true);
2563
2564 // Return RA, which contains the return address. Mark it an implicit live-in.
2566 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
2567}
2568
2569// An EH_RETURN is the result of lowering llvm.eh.return which in turn is
2570// generated from __builtin_eh_return (offset, handler)
2571// The effect of this is to adjust the stack pointer by "offset"
2572// and then branch to "handler".
2573SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
2574 const {
2577
2578 MipsFI->setCallsEhReturn();
2579 SDValue Chain = Op.getOperand(0);
2580 SDValue Offset = Op.getOperand(1);
2581 SDValue Handler = Op.getOperand(2);
2582 SDLoc DL(Op);
2583 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
2584
2585 // Store stack offset in V1, store jump target in V0. Glue CopyToReg and
2586 // EH_RETURN nodes, so that instructions are emitted back-to-back.
2587 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2588 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2589 Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
2590 Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
2591 return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
2592 DAG.getRegister(OffsetReg, Ty),
2593 DAG.getRegister(AddrReg, getPointerTy(MF.getDataLayout())),
2594 Chain.getValue(1));
2595}
2596
2597SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
2598 SelectionDAG &DAG) const {
2599 // FIXME: Need pseudo-fence for 'singlethread' fences
2600 // FIXME: Set SType for weaker fences where supported/appropriate.
2601 unsigned SType = 0;
2602 SDLoc DL(Op);
2603 return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
2604 DAG.getConstant(SType, DL, MVT::i32));
2605}
2606
2607SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
2608 SelectionDAG &DAG) const {
2609 SDLoc DL(Op);
2610 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2611
2612 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2613 SDValue Shamt = Op.getOperand(2);
2614 // if shamt < (VT.bits):
2615 // lo = (shl lo, shamt)
2616 // hi = (or (shl hi, shamt) (srl (srl lo, 1), (xor shamt, (VT.bits-1))))
2617 // else:
2618 // lo = 0
2619 // hi = (shl lo, shamt[4:0])
2620 SDValue Not =
2621 DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2622 DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32));
2623 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
2624 DAG.getConstant(1, DL, VT));
2625 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
2626 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2627 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2628 SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2629 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2630 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2631 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2632 DAG.getConstant(0, DL, VT), ShiftLeftLo);
2633 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
2634
2635 SDValue Ops[2] = {Lo, Hi};
2636 return DAG.getMergeValues(Ops, DL);
2637}
2638
2639SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2640 bool IsSRA) const {
2641 SDLoc DL(Op);
2642 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2643 SDValue Shamt = Op.getOperand(2);
2644 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2645
2646 // if shamt < (VT.bits):
2647 // lo = (or (shl (shl hi, 1), (xor shamt, (VT.bits-1))) (srl lo, shamt))
2648 // if isSRA:
2649 // hi = (sra hi, shamt)
2650 // else:
2651 // hi = (srl hi, shamt)
2652 // else:
2653 // if isSRA:
2654 // lo = (sra hi, shamt[4:0])
2655 // hi = (sra hi, 31)
2656 // else:
2657 // lo = (srl hi, shamt[4:0])
2658 // hi = 0
2659 SDValue Not =
2660 DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2661 DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32));
2662 SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
2663 DAG.getConstant(1, DL, VT));
2664 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
2665 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2666 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2667 SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
2668 DL, VT, Hi, Shamt);
2669 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2670 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2671 SDValue Ext = DAG.getNode(ISD::SRA, DL, VT, Hi,
2672 DAG.getConstant(VT.getSizeInBits() - 1, DL, VT));
2673
2674 if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) {
2675 SDVTList VTList = DAG.getVTList(VT, VT);
2678 DL, VTList, Cond, ShiftRightHi,
2679 IsSRA ? Ext : DAG.getConstant(0, DL, VT), Or,
2680 ShiftRightHi);
2681 }
2682
2683 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
2684 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2685 IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);
2686
2687 SDValue Ops[2] = {Lo, Hi};
2688 return DAG.getMergeValues(Ops, DL);
2689}
2690
2691static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2692 SDValue Chain, SDValue Src, unsigned Offset) {
2693 SDValue Ptr = LD->getBasePtr();
2694 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2695 EVT BasePtrVT = Ptr.getValueType();
2696 SDLoc DL(LD);
2697 SDVTList VTList = DAG.getVTList(VT, MVT::Other);
2698
2699 if (Offset)
2700 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2701 DAG.getConstant(Offset, DL, BasePtrVT));
2702
2703 SDValue Ops[] = { Chain, Ptr, Src };
2704 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2705 LD->getMemOperand());
2706}
2707
2708// Expand an unaligned 32 or 64-bit integer load node.
2710 LoadSDNode *LD = cast<LoadSDNode>(Op);
2711 EVT MemVT = LD->getMemoryVT();
2712
2714 return Op;
2715
2716 // Return if load is aligned or if MemVT is neither i32 nor i64.
2717 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||
2718 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2719 return SDValue();
2720
2721 bool IsLittle = Subtarget.isLittle();
2722 EVT VT = Op.getValueType();
2723 ISD::LoadExtType ExtType = LD->getExtensionType();
2724 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2725
2726 assert((VT == MVT::i32) || (VT == MVT::i64));
2727
2728 // Expand
2729 // (set dst, (i64 (load baseptr)))
2730 // to
2731 // (set tmp, (ldl (add baseptr, 7), undef))
2732 // (set dst, (ldr baseptr, tmp))
2733 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2734 SDValue LDL = createLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
2735 IsLittle ? 7 : 0);
2736 return createLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
2737 IsLittle ? 0 : 7);
2738 }
2739
2740 SDValue LWL = createLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
2741 IsLittle ? 3 : 0);
2742 SDValue LWR = createLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
2743 IsLittle ? 0 : 3);
2744
2745 // Expand
2746 // (set dst, (i32 (load baseptr))) or
2747 // (set dst, (i64 (sextload baseptr))) or
2748 // (set dst, (i64 (extload baseptr)))
2749 // to
2750 // (set tmp, (lwl (add baseptr, 3), undef))
2751 // (set dst, (lwr baseptr, tmp))
2752 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2753 (ExtType == ISD::EXTLOAD))
2754 return LWR;
2755
2756 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2757
2758 // Expand
2759 // (set dst, (i64 (zextload baseptr)))
2760 // to
2761 // (set tmp0, (lwl (add baseptr, 3), undef))
2762 // (set tmp1, (lwr baseptr, tmp0))
2763 // (set tmp2, (shl tmp1, 32))
2764 // (set dst, (srl tmp2, 32))
2765 SDLoc DL(LD);
2766 SDValue Const32 = DAG.getConstant(32, DL, MVT::i32);
2767 SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
2768 SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
2769 SDValue Ops[] = { SRL, LWR.getValue(1) };
2770 return DAG.getMergeValues(Ops, DL);
2771}
2772
2773static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2774 SDValue Chain, unsigned Offset) {
2775 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2776 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2777 SDLoc DL(SD);
2778 SDVTList VTList = DAG.getVTList(MVT::Other);
2779
2780 if (Offset)
2781 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2782 DAG.getConstant(Offset, DL, BasePtrVT));
2783
2784 SDValue Ops[] = { Chain, Value, Ptr };
2785 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2786 SD->getMemOperand());
2787}
2788
2789// Expand an unaligned 32 or 64-bit integer store node.
2791 bool IsLittle) {
2792 SDValue Value = SD->getValue(), Chain = SD->getChain();
2793 EVT VT = Value.getValueType();
2794
2795 // Expand
2796 // (store val, baseptr) or
2797 // (truncstore val, baseptr)
2798 // to
2799 // (swl val, (add baseptr, 3))
2800 // (swr val, baseptr)
2801 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2802 SDValue SWL = createStoreLR(MipsISD::SWL, DAG, SD, Chain,
2803 IsLittle ? 3 : 0);
2804 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2805 }
2806
2807 assert(VT == MVT::i64);
2808
2809 // Expand
2810 // (store val, baseptr)
2811 // to
2812 // (sdl val, (add baseptr, 7))
2813 // (sdr val, baseptr)
2814 SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2815 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2816}
2817
2818// Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
2820 bool SingleFloat) {
2821 SDValue Val = SD->getValue();
2822
2823 if (Val.getOpcode() != ISD::FP_TO_SINT ||
2824 (Val.getValueSizeInBits() > 32 && SingleFloat))
2825 return SDValue();
2826
2828 SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
2829 Val.getOperand(0));
2830 return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
2831 SD->getPointerInfo(), SD->getAlign(),
2832 SD->getMemOperand()->getFlags());
2833}
2834
2836 StoreSDNode *SD = cast<StoreSDNode>(Op);
2837 EVT MemVT = SD->getMemoryVT();
2838
2839 // Lower unaligned integer stores.
2841 (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) &&
2842 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2843 return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
2844
2846}
2847
2848SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
2849 SelectionDAG &DAG) const {
2850
2851 // Return a fixed StackObject with offset 0 which points to the old stack
2852 // pointer.
2854 EVT ValTy = Op->getValueType(0);
2855 int FI = MFI.CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
2856 return DAG.getFrameIndex(FI, ValTy);
2857}
2858
2859SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
2860 SelectionDAG &DAG) const {
2861 if (Op.getValueSizeInBits() > 32 && Subtarget.isSingleFloat())
2862 return SDValue();
2863
2864 EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
2865 SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
2866 Op.getOperand(0));
2867 return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
2868}
2869
2870//===----------------------------------------------------------------------===//
2871// Calling Convention Implementation
2872//===----------------------------------------------------------------------===//
2873
2874//===----------------------------------------------------------------------===//
2875// TODO: Implement a generic logic using tblgen that can support this.
2876// Mips O32 ABI rules:
2877// ---
2878// i32 - Passed in A0, A1, A2, A3 and stack
2879// f32 - Only passed in f32 registers if no int reg has been used yet to hold
2880// an argument. Otherwise, passed in A1, A2, A3 and stack.
2881// f64 - Only passed in two aliased f32 registers if no int reg has been used
2882// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2883// not used, it must be shadowed. If only A3 is available, shadow it and
2884// go to stack.
2885// vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
2886// vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
2887// with the remainder spilled to the stack.
2888// vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
2889// spilling the remainder to the stack.
2890//
2891// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2892//===----------------------------------------------------------------------===//
2893
2894static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2895 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2896 CCState &State, ArrayRef<MCPhysReg> F64Regs) {
2897 const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
2899
2900 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2901
2902 const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
2903
2904 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2905
2906 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2907
2908 // Do not process byval args here.
2909 if (ArgFlags.isByVal())
2910 return true;
2911
2912 // Promote i8 and i16
2913 if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
2914 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2915 LocVT = MVT::i32;
2916 if (ArgFlags.isSExt())
2917 LocInfo = CCValAssign::SExtUpper;
2918 else if (ArgFlags.isZExt())
2919 LocInfo = CCValAssign::ZExtUpper;
2920 else
2921 LocInfo = CCValAssign::AExtUpper;
2922 }
2923 }
2924
2925 // Promote i8 and i16
2926 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2927 LocVT = MVT::i32;
2928 if (ArgFlags.isSExt())
2929 LocInfo = CCValAssign::SExt;
2930 else if (ArgFlags.isZExt())
2931 LocInfo = CCValAssign::ZExt;
2932 else
2933 LocInfo = CCValAssign::AExt;
2934 }
2935
2936 unsigned Reg;
2937
2938 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
2939 // is true: function is vararg, argument is 3rd or higher, there is previous
2940 // argument which is not f32 or f64.
2941 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
2942 State.getFirstUnallocated(F32Regs) != ValNo;
2943 Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
2944 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));
2945 bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
2946
2947 // The MIPS vector ABI for floats passes them in a pair of registers
2948 if (ValVT == MVT::i32 && isVectorFloat) {
2949 // This is the start of an vector that was scalarized into an unknown number
2950 // of components. It doesn't matter how many there are. Allocate one of the
2951 // notional 8 byte aligned registers which map onto the argument stack, and
2952 // shadow the register lost to alignment requirements.
2953 if (ArgFlags.isSplit()) {
2954 Reg = State.AllocateReg(FloatVectorIntRegs);
2955 if (Reg == Mips::A2)
2956 State.AllocateReg(Mips::A1);
2957 else if (Reg == 0)
2958 State.AllocateReg(Mips::A3);
2959 } else {
2960 // If we're an intermediate component of the split, we can just attempt to
2961 // allocate a register directly.
2962 Reg = State.AllocateReg(IntRegs);
2963 }
2964 } else if (ValVT == MVT::i32 ||
2965 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2966 Reg = State.AllocateReg(IntRegs);
2967 // If this is the first part of an i64 arg,
2968 // the allocated register must be either A0 or A2.
2969 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2970 Reg = State.AllocateReg(IntRegs);
2971 LocVT = MVT::i32;
2972 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2973 // Allocate int register and shadow next int register. If first
2974 // available register is Mips::A1 or Mips::A3, shadow it too.
2975 Reg = State.AllocateReg(IntRegs);
2976 if (Reg == Mips::A1 || Reg == Mips::A3)
2977 Reg = State.AllocateReg(IntRegs);
2978
2979 if (Reg) {
2980 LocVT = MVT::i32;
2981
2982 State.addLoc(
2983 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2984 MCRegister HiReg = State.AllocateReg(IntRegs);
2985 assert(HiReg);
2986 State.addLoc(
2987 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
2988 return false;
2989 }
2990 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
2991 // we are guaranteed to find an available float register
2992 if (ValVT == MVT::f32) {
2993 Reg = State.AllocateReg(F32Regs);
2994 // Shadow int register
2995 State.AllocateReg(IntRegs);
2996 } else {
2997 Reg = State.AllocateReg(F64Regs);
2998 // Shadow int registers
2999 unsigned Reg2 = State.AllocateReg(IntRegs);
3000 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
3001 State.AllocateReg(IntRegs);
3002 State.AllocateReg(IntRegs);
3003 }
3004 } else
3005 llvm_unreachable("Cannot handle this ValVT.");
3006
3007 if (!Reg) {
3008 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
3009 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
3010 } else
3011 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
3012
3013 return false;
3014}
3015
3016static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
3017 MVT LocVT, CCValAssign::LocInfo LocInfo,
3018 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3019 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3020
3021 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3022}
3023
3024static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
3025 MVT LocVT, CCValAssign::LocInfo LocInfo,
3026 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3027 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3028
3029 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3030}
3031
3032static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
3033 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
3035
3036#include "MipsGenCallingConv.inc"
3037
3039 return CC_Mips_FixedArg;
3040 }
3041
3043 return RetCC_Mips;
3044 }
3045//===----------------------------------------------------------------------===//
3046// Call Calling Convention Implementation
3047//===----------------------------------------------------------------------===//
3048
3049SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
3050 SDValue Chain, SDValue Arg,
3051 const SDLoc &DL, bool IsTailCall,
3052 SelectionDAG &DAG) const {
3053 if (!IsTailCall) {
3054 SDValue PtrOff =
3055 DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
3057 return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo());
3058 }
3059
3061 int FI = MFI.CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
3062 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3063 return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(), MaybeAlign(),
3065}
3066
3069 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3070 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
3071 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
3072 SDValue Chain) const {
3073 // Insert node "GP copy globalreg" before call to function.
3074 //
3075 // R_MIPS_CALL* operators (emitted when non-internal functions are called
3076 // in PIC mode) allow symbols to be resolved via lazy binding.
3077 // The lazy binding stub requires GP to point to the GOT.
3078 // Note that we don't need GP to point to the GOT for indirect calls
3079 // (when R_MIPS_CALL* is not used for the call) because Mips linker generates
3080 // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs
3081 // used for the function (that is, Mips linker doesn't generate lazy binding
3082 // stub for a function whose address is taken in the program).
3083 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3084 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3085 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
3086 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
3087 }
3088
3089 // Build a sequence of copy-to-reg nodes chained together with token
3090 // chain and flag operands which copy the outgoing args into registers.
3091 // The InGlue in necessary since all emitted instructions must be
3092 // stuck together.
3093 SDValue InGlue;
3094
3095 for (auto &R : RegsToPass) {
3096 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InGlue);
3097 InGlue = Chain.getValue(1);
3098 }
3099
3100 // Add argument registers to the end of the list so that they are
3101 // known live into the call.
3102 for (auto &R : RegsToPass)
3103 Ops.push_back(CLI.DAG.getRegister(R.first, R.second.getValueType()));
3104
3105 // Add a register mask operand representing the call-preserved registers.
3107 const uint32_t *Mask =
3108 TRI->getCallPreservedMask(CLI.DAG.getMachineFunction(), CLI.CallConv);
3109 assert(Mask && "Missing call preserved mask for calling convention");
3111 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
3112 StringRef Sym = G->getGlobal()->getName();
3113 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
3114 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
3116 }
3117 }
3118 }
3119 Ops.push_back(CLI.DAG.getRegisterMask(Mask));
3120
3121 if (InGlue.getNode())
3122 Ops.push_back(InGlue);
3123}
3124
3126 SDNode *Node) const {
3127 switch (MI.getOpcode()) {
3128 default:
3129 return;
3130 case Mips::JALR:
3131 case Mips::JALRPseudo:
3132 case Mips::JALR64:
3133 case Mips::JALR64Pseudo:
3134 case Mips::JALR16_MM:
3135 case Mips::JALRC16_MMR6:
3136 case Mips::TAILCALLREG:
3137 case Mips::TAILCALLREG64:
3138 case Mips::TAILCALLR6REG:
3139 case Mips::TAILCALL64R6REG:
3140 case Mips::TAILCALLREG_MM:
3141 case Mips::TAILCALLREG_MMR6: {
3142 if (!EmitJalrReloc ||
3145 Node->getNumOperands() < 1 ||
3146 Node->getOperand(0).getNumOperands() < 2) {
3147 return;
3148 }
3149 // We are after the callee address, set by LowerCall().
3150 // If added to MI, asm printer will emit .reloc R_MIPS_JALR for the
3151 // symbol.
3152 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3153 StringRef Sym;
3154 if (const GlobalAddressSDNode *G =
3155 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3156 // We must not emit the R_MIPS_JALR relocation against data symbols
3157 // since this will cause run-time crashes if the linker replaces the
3158 // call instruction with a relative branch to the data symbol.
3159 if (!isa<Function>(G->getGlobal())) {
3160 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
3161 << G->getGlobal()->getName() << "\n");
3162 return;
3163 }
3164 Sym = G->getGlobal()->getName();
3165 }
3166 else if (const ExternalSymbolSDNode *ES =
3167 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3168 Sym = ES->getSymbol();
3169 }
3170
3171 if (Sym.empty())
3172 return;
3173
3174 MachineFunction *MF = MI.getParent()->getParent();
3176 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
3178 }
3179 }
3180}
3181
3182/// LowerCall - functions arguments are copied from virtual regs to
3183/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
3184SDValue
3185MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3186 SmallVectorImpl<SDValue> &InVals) const {
3187 SelectionDAG &DAG = CLI.DAG;
3188 SDLoc DL = CLI.DL;
3190 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3192 SDValue Chain = CLI.Chain;
3193 SDValue Callee = CLI.Callee;
3194 bool &IsTailCall = CLI.IsTailCall;
3195 CallingConv::ID CallConv = CLI.CallConv;
3196 bool IsVarArg = CLI.IsVarArg;
3197
3199 MachineFrameInfo &MFI = MF.getFrameInfo();
3201 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
3202 bool IsPIC = isPositionIndependent();
3203
3204 // Analyze operands of the call, assigning locations to each operand.
3206 MipsCCState CCInfo(
3207 CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext(),
3209
3210 const ExternalSymbolSDNode *ES =
3211 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3212
3213 // There is one case where CALLSEQ_START..CALLSEQ_END can be nested, which
3214 // is during the lowering of a call with a byval argument which produces
3215 // a call to memcpy. For the O32 case, this causes the caller to allocate
3216 // stack space for the reserved argument area for the callee, then recursively
3217 // again for the memcpy call. In the NEWABI case, this doesn't occur as those
3218 // ABIs mandate that the callee allocates the reserved argument area. We do
3219 // still produce nested CALLSEQ_START..CALLSEQ_END with zero space though.
3220 //
3221 // If the callee has a byval argument and memcpy is used, we are mandated
3222 // to already have produced a reserved argument area for the callee for O32.
3223 // Therefore, the reserved argument area can be reused for both calls.
3224 //
3225 // Other cases of calling memcpy cannot have a chain with a CALLSEQ_START
3226 // present, as we have yet to hook that node onto the chain.
3227 //
3228 // Hence, the CALLSEQ_START and CALLSEQ_END nodes can be eliminated in this
3229 // case. GCC does a similar trick, in that wherever possible, it calculates
3230 // the maximum out going argument area (including the reserved area), and
3231 // preallocates the stack space on entrance to the caller.
3232 //
3233 // FIXME: We should do the same for efficiency and space.
3234
3235 // Note: The check on the calling convention below must match
3236 // MipsABIInfo::GetCalleeAllocdArgSizeInBytes().
3237 bool MemcpyInByVal = ES && StringRef(ES->getSymbol()) == "memcpy" &&
3238 CallConv != CallingConv::Fast &&
3239 Chain.getOpcode() == ISD::CALLSEQ_START;
3240
3241 // Allocate the reserved argument area. It seems strange to do this from the
3242 // caller side but removing it breaks the frame size calculation.
3243 unsigned ReservedArgArea =
3244 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
3245 CCInfo.AllocateStack(ReservedArgArea, Align(1));
3246
3247 CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
3248 ES ? ES->getSymbol() : nullptr);
3249
3250 // Get a count of how many bytes are to be pushed on the stack.
3251 unsigned StackSize = CCInfo.getStackSize();
3252
3253 // Call site info for function parameters tracking.
3255
3256 // Check if it's really possible to do a tail call. Restrict it to functions
3257 // that are part of this compilation unit.
3258 bool InternalLinkage = false;
3259 if (IsTailCall) {
3260 IsTailCall = isEligibleForTailCallOptimization(
3261 CCInfo, StackSize, *MF.getInfo<MipsFunctionInfo>());
3262 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3263 InternalLinkage = G->getGlobal()->hasInternalLinkage();
3264 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
3265 G->getGlobal()->hasPrivateLinkage() ||
3266 G->getGlobal()->hasHiddenVisibility() ||
3267 G->getGlobal()->hasProtectedVisibility());
3268 }
3269 }
3270 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
3271 report_fatal_error("failed to perform tail call elimination on a call "
3272 "site marked musttail");
3273
3274 if (IsTailCall)
3275 ++NumTailCalls;
3276
3277 // Chain is the output chain of the last Load/Store or CopyToReg node.
3278 // ByValChain is the output chain of the last Memcpy node created for copying
3279 // byval arguments to the stack.
3280 unsigned StackAlignment = TFL->getStackAlignment();
3281 StackSize = alignTo(StackSize, StackAlignment);
3282
3283 if (!(IsTailCall || MemcpyInByVal))
3284 Chain = DAG.getCALLSEQ_START(Chain, StackSize, 0, DL);
3285
3287 DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP,
3289
3290 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3291 SmallVector<SDValue, 8> MemOpChains;
3292
3293 CCInfo.rewindByValRegsInfo();
3294
3295 // Walk the register/memloc assignments, inserting copies/loads.
3296 for (unsigned i = 0, e = ArgLocs.size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3297 SDValue Arg = OutVals[OutIdx];
3298 CCValAssign &VA = ArgLocs[i];
3299 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
3300 ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
3301 bool UseUpperBits = false;
3302
3303 // ByVal Arg.
3304 if (Flags.isByVal()) {
3305 unsigned FirstByValReg, LastByValReg;
3306 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3307 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3308
3309 assert(Flags.getByValSize() &&
3310 "ByVal args of size 0 should have been ignored by front-end.");
3311 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3312 assert(!IsTailCall &&
3313 "Do not tail-call optimize if there is a byval argument.");
3314 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3315 FirstByValReg, LastByValReg, Flags, Subtarget.isLittle(),
3316 VA);
3317 CCInfo.nextInRegsParam();
3318 continue;
3319 }
3320
3321 // Promote the value if needed.
3322 switch (VA.getLocInfo()) {
3323 default:
3324 llvm_unreachable("Unknown loc info!");
3325 case CCValAssign::Full:
3326 if (VA.isRegLoc()) {
3327 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3328 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3329 (ValVT == MVT::i64 && LocVT == MVT::f64))
3330 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3331 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3333 Arg, DAG.getConstant(0, DL, MVT::i32));
3335 Arg, DAG.getConstant(1, DL, MVT::i32));
3336 if (!Subtarget.isLittle())
3337 std::swap(Lo, Hi);
3338
3339 assert(VA.needsCustom());
3340
3341 Register LocRegLo = VA.getLocReg();
3342 Register LocRegHigh = ArgLocs[++i].getLocReg();
3343 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
3344 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
3345 continue;
3346 }
3347 }
3348 break;
3349 case CCValAssign::BCvt:
3350 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3351 break;
3353 UseUpperBits = true;
3354 [[fallthrough]];
3355 case CCValAssign::SExt:
3356 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
3357 break;
3359 UseUpperBits = true;
3360 [[fallthrough]];
3361 case CCValAssign::ZExt:
3362 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
3363 break;
3365 UseUpperBits = true;
3366 [[fallthrough]];
3367 case CCValAssign::AExt:
3368 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
3369 break;
3370 }
3371
3372 if (UseUpperBits) {
3373 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3374 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3375 Arg = DAG.getNode(
3376 ISD::SHL, DL, VA.getLocVT(), Arg,
3377 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3378 }
3379
3380 // Arguments that can be passed on register must be kept at
3381 // RegsToPass vector
3382 if (VA.isRegLoc()) {
3383 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3384
3385 // If the parameter is passed through reg $D, which splits into
3386 // two physical registers, avoid creating call site info.
3387 if (Mips::AFGR64RegClass.contains(VA.getLocReg()))
3388 continue;
3389
3390 // Collect CSInfo about which register passes which parameter.
3391 const TargetOptions &Options = DAG.getTarget().Options;
3392 if (Options.EmitCallSiteInfo)
3393 CSInfo.ArgRegPairs.emplace_back(VA.getLocReg(), i);
3394
3395 continue;
3396 }
3397
3398 // Register can't get to this point...
3399 assert(VA.isMemLoc());
3400
3401 // emit ISD::STORE whichs stores the
3402 // parameter value to a stack Location
3403 MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(),
3404 Chain, Arg, DL, IsTailCall, DAG));
3405 }
3406
3407 // Transform all store nodes into one single node because all store
3408 // nodes are independent of each other.
3409 if (!MemOpChains.empty())
3410 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3411
3412 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
3413 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
3414 // node so that legalize doesn't hack it.
3415
3416 EVT Ty = Callee.getValueType();
3417 bool GlobalOrExternal = false, IsCallReloc = false;
3418
3419 // The long-calls feature is ignored in case of PIC.
3420 // While we do not support -mshared / -mno-shared properly,
3421 // ignore long-calls in case of -mabicalls too.
3422 if (!Subtarget.isABICalls() && !IsPIC) {
3423 // If the function should be called using "long call",
3424 // get its address into a register to prevent using
3425 // of the `jal` instruction for the direct call.
3426 if (auto *N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3427 if (Subtarget.useLongCalls())
3429 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3430 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3431 } else if (auto *N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3432 bool UseLongCalls = Subtarget.useLongCalls();
3433 // If the function has long-call/far/near attribute
3434 // it overrides command line switch pased to the backend.
3435 if (auto *F = dyn_cast<Function>(N->getGlobal())) {
3436 if (F->hasFnAttribute("long-call"))
3437 UseLongCalls = true;
3438 else if (F->hasFnAttribute("short-call"))
3439 UseLongCalls = false;
3440 }
3441 if (UseLongCalls)
3443 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3444 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3445 }
3446 }
3447
3448 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3449 if (IsPIC) {
3450 const GlobalValue *Val = G->getGlobal();
3451 InternalLinkage = Val->hasInternalLinkage();
3452
3453 if (InternalLinkage)
3454 Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
3455 else if (Subtarget.useXGOT()) {
3457 MipsII::MO_CALL_LO16, Chain,
3458 FuncInfo->callPtrInfo(MF, Val));
3459 IsCallReloc = true;
3460 } else {
3461 Callee = getAddrGlobal(G, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3462 FuncInfo->callPtrInfo(MF, Val));
3463 IsCallReloc = true;
3464 }
3465 } else
3466 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL,
3467 getPointerTy(DAG.getDataLayout()), 0,
3469 GlobalOrExternal = true;
3470 }
3471 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3472 const char *Sym = S->getSymbol();
3473
3474 if (!IsPIC) // static
3477 else if (Subtarget.useXGOT()) {
3479 MipsII::MO_CALL_LO16, Chain,
3480 FuncInfo->callPtrInfo(MF, Sym));
3481 IsCallReloc = true;
3482 } else { // PIC
3483 Callee = getAddrGlobal(S, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3484 FuncInfo->callPtrInfo(MF, Sym));
3485 IsCallReloc = true;
3486 }
3487
3488 GlobalOrExternal = true;
3489 }
3490
3491 SmallVector<SDValue, 8> Ops(1, Chain);
3492 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3493
3494 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3495 IsCallReloc, CLI, Callee, Chain);
3496
3497 if (IsTailCall) {
3499 SDValue Ret = DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
3500 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
3501 return Ret;
3502 }
3503
3504 Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
3505 SDValue InGlue = Chain.getValue(1);
3506
3507 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
3508
3509 // Create the CALLSEQ_END node in the case of where it is not a call to
3510 // memcpy.
3511 if (!(MemcpyInByVal)) {
3512 Chain = DAG.getCALLSEQ_END(Chain, StackSize, 0, InGlue, DL);
3513 InGlue = Chain.getValue(1);
3514 }
3515
3516 // Handle result values, copying them out of physregs into vregs that we
3517 // return.
3518 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
3519 InVals, CLI);
3520}
3521
3522/// LowerCallResult - Lower the result values of a call into the
3523/// appropriate copies out of appropriate physical registers.
3524SDValue MipsTargetLowering::LowerCallResult(
3525 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
3526 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3529 // Assign locations to each value returned by this call.
3531 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3532 *DAG.getContext());
3533
3534 const ExternalSymbolSDNode *ES =
3535 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.Callee.getNode());
3536 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,
3537 ES ? ES->getSymbol() : nullptr);
3538
3539 // Copy all of the result registers out of their specified physreg.
3540 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3541 CCValAssign &VA = RVLocs[i];
3542 assert(VA.isRegLoc() && "Can only return in registers!");
3543
3544 SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
3545 RVLocs[i].getLocVT(), InGlue);
3546 Chain = Val.getValue(1);
3547 InGlue = Val.getValue(2);
3548
3549 if (VA.isUpperBitsInLoc()) {
3550 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3551 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3552 unsigned Shift =
3554 Val = DAG.getNode(
3555 Shift, DL, VA.getLocVT(), Val,
3556 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3557 }
3558
3559 switch (VA.getLocInfo()) {
3560 default:
3561 llvm_unreachable("Unknown loc info!");
3562 case CCValAssign::Full:
3563 break;
3564 case CCValAssign::BCvt:
3565 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
3566 break;
3567 case CCValAssign::AExt:
3569 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3570 break;
3571 case CCValAssign::ZExt:
3573 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
3574 DAG.getValueType(VA.getValVT()));
3575 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3576 break;
3577 case CCValAssign::SExt:
3579 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
3580 DAG.getValueType(VA.getValVT()));
3581 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3582 break;
3583 }
3584
3585 InVals.push_back(Val);
3586 }
3587
3588 return Chain;
3589}
3590
3592 EVT ArgVT, const SDLoc &DL,
3593 SelectionDAG &DAG) {
3594 MVT LocVT = VA.getLocVT();
3595 EVT ValVT = VA.getValVT();
3596
3597 // Shift into the upper bits if necessary.
3598 switch (VA.getLocInfo()) {
3599 default:
3600 break;
3604 unsigned ValSizeInBits = ArgVT.getSizeInBits();
3605 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3606 unsigned Opcode =
3608 Val = DAG.getNode(
3609 Opcode, DL, VA.getLocVT(), Val,
3610 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3611 break;
3612 }
3613 }
3614
3615 // If this is an value smaller than the argument slot size (32-bit for O32,
3616 // 64-bit for N32/N64), it has been promoted in some way to the argument slot
3617 // size. Extract the value and insert any appropriate assertions regarding
3618 // sign/zero extension.
3619 switch (VA.getLocInfo()) {
3620 default:
3621 llvm_unreachable("Unknown loc info!");
3622 case CCValAssign::Full:
3623 break;
3625 case CCValAssign::AExt:
3626 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3627 break;
3629 case CCValAssign::SExt:
3630 Val = DAG.getNode(ISD::AssertSext, DL, LocVT, Val, DAG.getValueType(ValVT));
3631 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3632 break;
3634 case CCValAssign::ZExt:
3635 Val = DAG.getNode(ISD::AssertZext, DL, LocVT, Val, DAG.getValueType(ValVT));
3636 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3637 break;
3638 case CCValAssign::BCvt:
3639 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
3640 break;
3641 }
3642
3643 return Val;
3644}
3645
3646//===----------------------------------------------------------------------===//
3647// Formal Arguments Calling Convention Implementation
3648//===----------------------------------------------------------------------===//
3649/// LowerFormalArguments - transform physical registers into virtual registers
3650/// and generate load operations for arguments places on the stack.
3651SDValue MipsTargetLowering::LowerFormalArguments(
3652 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3653 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3654 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3656 MachineFrameInfo &MFI = MF.getFrameInfo();
3658
3659 MipsFI->setVarArgsFrameIndex(0);
3660
3661 // Used with vargs to acumulate store chains.
3662 std::vector<SDValue> OutChains;
3663
3664 // Assign locations to all of the incoming arguments.
3666 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
3667 *DAG.getContext());
3668 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1));
3670 Function::const_arg_iterator FuncArg = Func.arg_begin();
3671
3672 if (Func.hasFnAttribute("interrupt") && !Func.arg_empty())
3674 "Functions with the interrupt attribute cannot have arguments!");
3675
3676 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3677 MipsFI->setFormalArgInfo(CCInfo.getStackSize(),
3678 CCInfo.getInRegsParamsCount() > 0);
3679
3680 unsigned CurArgIdx = 0;
3681 CCInfo.rewindByValRegsInfo();
3682
3683 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3684 CCValAssign &VA = ArgLocs[i];
3685 if (Ins[InsIdx].isOrigArg()) {
3686 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3687 CurArgIdx = Ins[InsIdx].getOrigArgIndex();
3688 }
3689 EVT ValVT = VA.getValVT();
3690 ISD::ArgFlagsTy Flags = Ins[InsIdx].Flags;
3691 bool IsRegLoc = VA.isRegLoc();
3692
3693 if (Flags.isByVal()) {
3694 assert(Ins[InsIdx].isOrigArg() && "Byval arguments cannot be implicit");
3695 unsigned FirstByValReg, LastByValReg;
3696 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3697 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3698
3699 assert(Flags.getByValSize() &&
3700 "ByVal args of size 0 should have been ignored by front-end.");
3701 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3702 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3703 FirstByValReg, LastByValReg, VA, CCInfo);
3704 CCInfo.nextInRegsParam();
3705 continue;
3706 }
3707
3708 // Arguments stored on registers
3709 if (IsRegLoc) {
3710 MVT RegVT = VA.getLocVT();
3711 Register ArgReg = VA.getLocReg();
3712 const TargetRegisterClass *RC = getRegClassFor(RegVT);
3713
3714 // Transform the arguments stored on
3715 // physical registers into virtual ones
3716 unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC);
3717 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
3718
3719 ArgValue =
3720 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3721
3722 // Handle floating point arguments passed in integer registers and
3723 // long double arguments passed in floating point registers.
3724 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3725 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3726 (RegVT == MVT::f64 && ValVT == MVT::i64))
3727 ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
3728 else if (ABI.IsO32() && RegVT == MVT::i32 &&
3729 ValVT == MVT::f64) {
3730 assert(VA.needsCustom() && "Expected custom argument for f64 split");
3731 CCValAssign &NextVA = ArgLocs[++i];
3732 unsigned Reg2 =
3733 addLiveIn(DAG.getMachineFunction(), NextVA.getLocReg(), RC);
3734 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
3735 if (!Subtarget.isLittle())
3736 std::swap(ArgValue, ArgValue2);
3737 ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
3738 ArgValue, ArgValue2);
3739 }
3740
3741 InVals.push_back(ArgValue);
3742 } else { // VA.isRegLoc()
3743 MVT LocVT = VA.getLocVT();
3744
3745 assert(!VA.needsCustom() && "unexpected custom memory argument");
3746
3747 // Only arguments pased on the stack should make it here.
3748 assert(VA.isMemLoc());
3749
3750 // The stack pointer offset is relative to the caller stack frame.
3751 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
3752 VA.getLocMemOffset(), true);
3753
3754 // Create load nodes to retrieve arguments from the stack
3755 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3756 SDValue ArgValue = DAG.getLoad(
3757 LocVT, DL, Chain, FIN,
3759 OutChains.push_back(ArgValue.getValue(1));
3760
3761 ArgValue =
3762 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3763
3764 InVals.push_back(ArgValue);
3765 }
3766 }
3767
3768 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3769
3770 if (ArgLocs[i].needsCustom()) {
3771 ++i;
3772 continue;
3773 }
3774
3775 // The mips ABIs for returning structs by value requires that we copy
3776 // the sret argument into $v0 for the return. Save the argument into
3777 // a virtual register so that we can access it from the return points.
3778 if (Ins[InsIdx].Flags.isSRet()) {
3779 unsigned Reg = MipsFI->getSRetReturnReg();
3780 if (!Reg) {
3782 getRegClassFor(ABI.IsN64() ? MVT::i64 : MVT::i32));
3783 MipsFI->setSRetReturnReg(Reg);
3784 }
3785 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
3786 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
3787 break;
3788 }
3789 }
3790
3791 if (IsVarArg)
3792 writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);
3793
3794 // All stores are grouped in one node to allow the matching between
3795 // the size of Ins and InVals. This only happens when on varg functions
3796 if (!OutChains.empty()) {
3797 OutChains.push_back(Chain);
3798 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
3799 }
3800
3801 return Chain;
3802}
3803
3804//===----------------------------------------------------------------------===//
3805// Return Value Calling Convention Implementation
3806//===----------------------------------------------------------------------===//
3807
3808bool
3809MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3810 MachineFunction &MF, bool IsVarArg,
3812 LLVMContext &Context) const {
3814 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3815 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3816}
3817
3818bool MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type,
3819 bool IsSigned) const {
3820 if ((ABI.IsN32() || ABI.IsN64()) && Type == MVT::i32)
3821 return true;
3822
3823 return IsSigned;
3824}
3825
3826SDValue
3827MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
3828 const SDLoc &DL,
3829 SelectionDAG &DAG) const {
3832
3833 MipsFI->setISR();
3834
3835 return DAG.getNode(MipsISD::ERet, DL, MVT::Other, RetOps);
3836}
3837
3838SDValue
3839MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3840 bool IsVarArg,
3842 const SmallVectorImpl<SDValue> &OutVals,
3843 const SDLoc &DL, SelectionDAG &DAG) const {
3844 // CCValAssign - represent the assignment of
3845 // the return value to a location
3848
3849 // CCState - Info about the registers and stack slot.
3850 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3851
3852 // Analyze return values.
3853 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3854
3855 SDValue Glue;
3856 SmallVector<SDValue, 4> RetOps(1, Chain);
3857
3858 // Copy the result values into the output registers.
3859 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3860 SDValue Val = OutVals[i];
3861 CCValAssign &VA = RVLocs[i];
3862 assert(VA.isRegLoc() && "Can only return in registers!");
3863 bool UseUpperBits = false;
3864
3865 switch (VA.getLocInfo()) {
3866 default:
3867 llvm_unreachable("Unknown loc info!");
3868 case CCValAssign::Full:
3869 break;
3870 case CCValAssign::BCvt:
3871 Val = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Val);
3872 break;
3874 UseUpperBits = true;
3875 [[fallthrough]];
3876 case CCValAssign::AExt:
3877 Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
3878 break;
3880 UseUpperBits = true;
3881 [[fallthrough]];
3882 case CCValAssign::ZExt:
3883 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
3884 break;
3886 UseUpperBits = true;
3887 [[fallthrough]];
3888 case CCValAssign::SExt:
3889 Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
3890 break;
3891 }
3892
3893 if (UseUpperBits) {
3894 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3895 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3896 Val = DAG.getNode(
3897 ISD::SHL, DL, VA.getLocVT(), Val,
3898 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3899 }
3900
3901 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
3902
3903 // Guarantee that all emitted copies are stuck together with flags.
3904 Glue = Chain.getValue(1);
3905 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3906 }
3907
3908 // The mips ABIs for returning structs by value requires that we copy
3909 // the sret argument into $v0 for the return. We saved the argument into
3910 // a virtual register in the entry block, so now we copy the value out
3911 // and into $v0.
3912 if (MF.getFunction().hasStructRetAttr()) {
3914 unsigned Reg = MipsFI->getSRetReturnReg();
3915
3916 if (!Reg)
3917 llvm_unreachable("sret virtual register not created in the entry block");
3918 SDValue Val =
3919 DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout()));
3920 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
3921
3922 Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Glue);
3923 Glue = Chain.getValue(1);
3924 RetOps.push_back(DAG.getRegister(V0, getPointerTy(DAG.getDataLayout())));
3925 }
3926
3927 RetOps[0] = Chain; // Update chain.
3928
3929 // Add the glue if we have it.
3930 if (Glue.getNode())
3931 RetOps.push_back(Glue);
3932
3933 // ISRs must use "eret".
3934 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
3935 return LowerInterruptReturn(RetOps, DL, DAG);
3936
3937 // Standard return on Mips is a "jr $ra"
3938 return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
3939}
3940
3941//===----------------------------------------------------------------------===//
3942// Mips Inline Assembly Support
3943//===----------------------------------------------------------------------===//
3944
3945/// getConstraintType - Given a constraint letter, return the type of
3946/// constraint it is for this target.
3948MipsTargetLowering::getConstraintType(StringRef Constraint) const {
3949 // Mips specific constraints
3950 // GCC config/mips/constraints.md
3951 //
3952 // 'd' : An address register. Equivalent to r
3953 // unless generating MIPS16 code.
3954 // 'y' : Equivalent to r; retained for
3955 // backwards compatibility.
3956 // 'c' : A register suitable for use in an indirect
3957 // jump. This will always be $25 for -mabicalls.
3958 // 'l' : The lo register. 1 word storage.
3959 // 'x' : The hilo register pair. Double word storage.
3960 if (Constraint.size() == 1) {
3961 switch (Constraint[0]) {
3962 default : break;
3963 case 'd':
3964 case 'y':
3965 case 'f':
3966 case 'c':
3967 case 'l':
3968 case 'x':
3969 return C_RegisterClass;
3970 case 'R':
3971 return C_Memory;
3972 }
3973 }
3974
3975 if (Constraint == "ZC")
3976 return C_Memory;
3977
3978 return <