LLVM 20.0.0git
MipsFastISel.cpp
Go to the documentation of this file.
1//===- MipsFastISel.cpp - Mips FastISel implementation --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the MIPS-specific support for the FastISel class.
11/// Some of the target-specific code is generated by tablegen in the file
12/// MipsGenFastISel.inc, which is #included here.
13///
14//===----------------------------------------------------------------------===//
15
18#include "MipsCCState.h"
19#include "MipsISelLowering.h"
20#include "MipsInstrInfo.h"
21#include "MipsMachineFunction.h"
22#include "MipsSubtarget.h"
23#include "MipsTargetMachine.h"
24#include "llvm/ADT/APInt.h"
25#include "llvm/ADT/DenseMap.h"
41#include "llvm/IR/Attributes.h"
42#include "llvm/IR/CallingConv.h"
43#include "llvm/IR/Constant.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/Function.h"
48#include "llvm/IR/GlobalValue.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instruction.h"
54#include "llvm/IR/Operator.h"
55#include "llvm/IR/Type.h"
56#include "llvm/IR/User.h"
57#include "llvm/IR/Value.h"
58#include "llvm/MC/MCContext.h"
59#include "llvm/MC/MCInstrDesc.h"
60#include "llvm/MC/MCSymbol.h"
63#include "llvm/Support/Debug.h"
67#include <algorithm>
68#include <array>
69#include <cassert>
70#include <cstdint>
71
72#define DEBUG_TYPE "mips-fastisel"
73
74using namespace llvm;
75
77
78namespace {
79
80class MipsFastISel final : public FastISel {
81
82 // All possible address modes.
83 class Address {
84 public:
85 using BaseKind = enum { RegBase, FrameIndexBase };
86
87 private:
88 BaseKind Kind = RegBase;
89 union {
90 unsigned Reg;
91 int FI;
92 } Base;
93
94 int64_t Offset = 0;
95
96 const GlobalValue *GV = nullptr;
97
98 public:
99 // Innocuous defaults for our address.
100 Address() { Base.Reg = 0; }
101
102 void setKind(BaseKind K) { Kind = K; }
103 BaseKind getKind() const { return Kind; }
104 bool isRegBase() const { return Kind == RegBase; }
105 bool isFIBase() const { return Kind == FrameIndexBase; }
106
107 void setReg(unsigned Reg) {
108 assert(isRegBase() && "Invalid base register access!");
109 Base.Reg = Reg;
110 }
111
112 unsigned getReg() const {
113 assert(isRegBase() && "Invalid base register access!");
114 return Base.Reg;
115 }
116
117 void setFI(unsigned FI) {
118 assert(isFIBase() && "Invalid base frame index access!");
119 Base.FI = FI;
120 }
121
122 unsigned getFI() const {
123 assert(isFIBase() && "Invalid base frame index access!");
124 return Base.FI;
125 }
126
127 void setOffset(int64_t Offset_) { Offset = Offset_; }
128 int64_t getOffset() const { return Offset; }
129 void setGlobalValue(const GlobalValue *G) { GV = G; }
130 const GlobalValue *getGlobalValue() { return GV; }
131 };
132
133 /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
134 /// make the right decision when generating code for different targets.
135 const TargetMachine &TM;
136 const MipsSubtarget *Subtarget;
137 const TargetInstrInfo &TII;
138 const TargetLowering &TLI;
140
141 // Convenience variables to avoid some queries.
142 LLVMContext *Context;
143
144 bool fastLowerArguments() override;
145 bool fastLowerCall(CallLoweringInfo &CLI) override;
146 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
147
148 bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle
149 // floating point but not reject doing fast-isel in other
150 // situations
151
152private:
153 // Selection routines.
154 bool selectLogicalOp(const Instruction *I);
155 bool selectLoad(const Instruction *I);
156 bool selectStore(const Instruction *I);
157 bool selectBranch(const Instruction *I);
158 bool selectSelect(const Instruction *I);
159 bool selectCmp(const Instruction *I);
160 bool selectFPExt(const Instruction *I);
161 bool selectFPTrunc(const Instruction *I);
162 bool selectFPToInt(const Instruction *I, bool IsSigned);
163 bool selectRet(const Instruction *I);
164 bool selectTrunc(const Instruction *I);
165 bool selectIntExt(const Instruction *I);
166 bool selectShift(const Instruction *I);
167 bool selectDivRem(const Instruction *I, unsigned ISDOpcode);
168
169 // Utility helper routines.
170 bool isTypeLegal(Type *Ty, MVT &VT);
171 bool isTypeSupported(Type *Ty, MVT &VT);
172 bool isLoadTypeLegal(Type *Ty, MVT &VT);
173 bool computeAddress(const Value *Obj, Address &Addr);
174 bool computeCallAddress(const Value *V, Address &Addr);
175 void simplifyAddress(Address &Addr);
176
177 // Emit helper routines.
178 bool emitCmp(unsigned DestReg, const CmpInst *CI);
179 bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr);
180 bool emitStore(MVT VT, unsigned SrcReg, Address &Addr);
181 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
182 bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg,
183
184 bool IsZExt);
185 bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
186
187 bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
188 bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
189 unsigned DestReg);
190 bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
191 unsigned DestReg);
192
193 unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned);
194
195 unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
196 const Value *RHS);
197
198 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
199 unsigned materializeGV(const GlobalValue *GV, MVT VT);
200 unsigned materializeInt(const Constant *C, MVT VT);
201 unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC);
202 unsigned materializeExternalCallSym(MCSymbol *Syn);
203
204 MachineInstrBuilder emitInst(unsigned Opc) {
205 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
206 }
207
208 MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) {
209 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
210 DstReg);
211 }
212
213 MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg,
214 unsigned MemReg, int64_t MemOffset) {
215 return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset);
216 }
217
218 MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg,
219 unsigned MemReg, int64_t MemOffset) {
220 return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset);
221 }
222
223 unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
224 const TargetRegisterClass *RC,
225 unsigned Op0, unsigned Op1);
226
227 // for some reason, this default is not generated by tablegen
228 // so we explicitly generate it here.
229 unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
230 unsigned Op0, uint64_t imm1, uint64_t imm2,
231 unsigned Op3) {
232 return 0;
233 }
234
235 // Call handling routines.
236private:
237 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
238 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
239 unsigned &NumBytes);
240 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
241
242 const MipsABIInfo &getABI() const {
243 return static_cast<const MipsTargetMachine &>(TM).getABI();
244 }
245
246public:
247 // Backend specific FastISel code.
248 explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
249 const TargetLibraryInfo *libInfo)
250 : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()),
251 Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()),
252 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
253 MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
254 Context = &funcInfo.Fn->getContext();
255 UnsupportedFPMode = Subtarget->isFP64bit() || Subtarget->useSoftFloat();
256 }
257
258 unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
259 unsigned fastMaterializeConstant(const Constant *C) override;
260 bool fastSelectInstruction(const Instruction *I) override;
261
262#include "MipsGenFastISel.inc"
263};
264
265} // end anonymous namespace
266
267static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT,
268 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
270
271static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT,
272 CCValAssign::LocInfo LocInfo,
273 ISD::ArgFlagsTy ArgFlags, CCState &State) {
274 llvm_unreachable("should not be called");
275}
276
277static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
278 CCValAssign::LocInfo LocInfo,
279 ISD::ArgFlagsTy ArgFlags, CCState &State) {
280 llvm_unreachable("should not be called");
281}
282
283#include "MipsGenCallingConv.inc"
284
285CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const {
286 return CC_MipsO32;
287}
288
289unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
290 const Value *LHS, const Value *RHS) {
291 // Canonicalize immediates to the RHS first.
292 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
293 std::swap(LHS, RHS);
294
295 unsigned Opc;
296 switch (ISDOpc) {
297 case ISD::AND:
298 Opc = Mips::AND;
299 break;
300 case ISD::OR:
301 Opc = Mips::OR;
302 break;
303 case ISD::XOR:
304 Opc = Mips::XOR;
305 break;
306 default:
307 llvm_unreachable("unexpected opcode");
308 }
309
310 Register LHSReg = getRegForValue(LHS);
311 if (!LHSReg)
312 return 0;
313
314 unsigned RHSReg;
315 if (const auto *C = dyn_cast<ConstantInt>(RHS))
316 RHSReg = materializeInt(C, MVT::i32);
317 else
318 RHSReg = getRegForValue(RHS);
319 if (!RHSReg)
320 return 0;
321
322 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
323 if (!ResultReg)
324 return 0;
325
326 emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg);
327 return ResultReg;
328}
329
330unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
331 assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i32 &&
332 "Alloca should always return a pointer.");
333
335 FuncInfo.StaticAllocaMap.find(AI);
336
337 if (SI != FuncInfo.StaticAllocaMap.end()) {
338 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
339 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::LEA_ADDiu),
340 ResultReg)
341 .addFrameIndex(SI->second)
342 .addImm(0);
343 return ResultReg;
344 }
345
346 return 0;
347}
348
349unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
350 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
351 return 0;
352 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
353 const ConstantInt *CI = cast<ConstantInt>(C);
354 return materialize32BitInt(CI->getZExtValue(), RC);
355}
356
357unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
358 const TargetRegisterClass *RC) {
359 Register ResultReg = createResultReg(RC);
360
361 if (isInt<16>(Imm)) {
362 unsigned Opc = Mips::ADDiu;
363 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
364 return ResultReg;
365 } else if (isUInt<16>(Imm)) {
366 emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
367 return ResultReg;
368 }
369 unsigned Lo = Imm & 0xFFFF;
370 unsigned Hi = (Imm >> 16) & 0xFFFF;
371 if (Lo) {
372 // Both Lo and Hi have nonzero bits.
373 Register TmpReg = createResultReg(RC);
374 emitInst(Mips::LUi, TmpReg).addImm(Hi);
375 emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
376 } else {
377 emitInst(Mips::LUi, ResultReg).addImm(Hi);
378 }
379 return ResultReg;
380}
381
382unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
383 if (UnsupportedFPMode)
384 return 0;
385 int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
386 if (VT == MVT::f32) {
387 const TargetRegisterClass *RC = &Mips::FGR32RegClass;
388 Register DestReg = createResultReg(RC);
389 unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
390 emitInst(Mips::MTC1, DestReg).addReg(TempReg);
391 return DestReg;
392 } else if (VT == MVT::f64) {
393 const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
394 Register DestReg = createResultReg(RC);
395 unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
396 unsigned TempReg2 =
397 materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
398 emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
399 return DestReg;
400 }
401 return 0;
402}
403
404unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
405 // For now 32-bit only.
406 if (VT != MVT::i32)
407 return 0;
408 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
409 Register DestReg = createResultReg(RC);
410 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
411 bool IsThreadLocal = GVar && GVar->isThreadLocal();
412 // TLS not supported at this time.
413 if (IsThreadLocal)
414 return 0;
415 emitInst(Mips::LW, DestReg)
416 .addReg(MFI->getGlobalBaseReg(*MF))
417 .addGlobalAddress(GV, 0, MipsII::MO_GOT);
418 if ((GV->hasInternalLinkage() ||
419 (GV->hasLocalLinkage() && !isa<Function>(GV)))) {
420 Register TempReg = createResultReg(RC);
421 emitInst(Mips::ADDiu, TempReg)
422 .addReg(DestReg)
423 .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
424 DestReg = TempReg;
425 }
426 return DestReg;
427}
428
429unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
430 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
431 Register DestReg = createResultReg(RC);
432 emitInst(Mips::LW, DestReg)
433 .addReg(MFI->getGlobalBaseReg(*MF))
434 .addSym(Sym, MipsII::MO_GOT);
435 return DestReg;
436}
437
438// Materialize a constant into a register, and return the register
439// number (or zero if we failed to handle it).
440unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) {
441 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
442
443 // Only handle simple types.
444 if (!CEVT.isSimple())
445 return 0;
446 MVT VT = CEVT.getSimpleVT();
447
448 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
449 return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT);
450 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
451 return materializeGV(GV, VT);
452 else if (isa<ConstantInt>(C))
453 return materializeInt(C, VT);
454
455 return 0;
456}
457
458bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
459 const User *U = nullptr;
460 unsigned Opcode = Instruction::UserOp1;
461 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
462 // Don't walk into other basic blocks unless the object is an alloca from
463 // another block, otherwise it may not have a virtual register assigned.
464 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
465 FuncInfo.getMBB(I->getParent()) == FuncInfo.MBB) {
466 Opcode = I->getOpcode();
467 U = I;
468 }
469 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
470 Opcode = C->getOpcode();
471 U = C;
472 }
473 switch (Opcode) {
474 default:
475 break;
476 case Instruction::BitCast:
477 // Look through bitcasts.
478 return computeAddress(U->getOperand(0), Addr);
479 case Instruction::GetElementPtr: {
480 Address SavedAddr = Addr;
481 int64_t TmpOffset = Addr.getOffset();
482 // Iterate through the GEP folding the constants into offsets where
483 // we can.
485 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
486 ++i, ++GTI) {
487 const Value *Op = *i;
488 if (StructType *STy = GTI.getStructTypeOrNull()) {
489 const StructLayout *SL = DL.getStructLayout(STy);
490 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
491 TmpOffset += SL->getElementOffset(Idx);
492 } else {
494 while (true) {
495 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
496 // Constant-offset addressing.
497 TmpOffset += CI->getSExtValue() * S;
498 break;
499 }
500 if (canFoldAddIntoGEP(U, Op)) {
501 // A compatible add with a constant operand. Fold the constant.
502 ConstantInt *CI =
503 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
504 TmpOffset += CI->getSExtValue() * S;
505 // Iterate on the other operand.
506 Op = cast<AddOperator>(Op)->getOperand(0);
507 continue;
508 }
509 // Unsupported
510 goto unsupported_gep;
511 }
512 }
513 }
514 // Try to grab the base operand now.
515 Addr.setOffset(TmpOffset);
516 if (computeAddress(U->getOperand(0), Addr))
517 return true;
518 // We failed, restore everything and try the other options.
519 Addr = SavedAddr;
520 unsupported_gep:
521 break;
522 }
523 case Instruction::Alloca: {
524 const AllocaInst *AI = cast<AllocaInst>(Obj);
526 FuncInfo.StaticAllocaMap.find(AI);
527 if (SI != FuncInfo.StaticAllocaMap.end()) {
528 Addr.setKind(Address::FrameIndexBase);
529 Addr.setFI(SI->second);
530 return true;
531 }
532 break;
533 }
534 }
535 Addr.setReg(getRegForValue(Obj));
536 return Addr.getReg() != 0;
537}
538
539bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
540 const User *U = nullptr;
541 unsigned Opcode = Instruction::UserOp1;
542
543 if (const auto *I = dyn_cast<Instruction>(V)) {
544 // Check if the value is defined in the same basic block. This information
545 // is crucial to know whether or not folding an operand is valid.
546 if (I->getParent() == FuncInfo.MBB->getBasicBlock()) {
547 Opcode = I->getOpcode();
548 U = I;
549 }
550 } else if (const auto *C = dyn_cast<ConstantExpr>(V)) {
551 Opcode = C->getOpcode();
552 U = C;
553 }
554
555 switch (Opcode) {
556 default:
557 break;
558 case Instruction::BitCast:
559 // Look past bitcasts if its operand is in the same BB.
560 return computeCallAddress(U->getOperand(0), Addr);
561 break;
562 case Instruction::IntToPtr:
563 // Look past no-op inttoptrs if its operand is in the same BB.
564 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
565 TLI.getPointerTy(DL))
566 return computeCallAddress(U->getOperand(0), Addr);
567 break;
568 case Instruction::PtrToInt:
569 // Look past no-op ptrtoints if its operand is in the same BB.
570 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
571 return computeCallAddress(U->getOperand(0), Addr);
572 break;
573 }
574
575 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
576 Addr.setGlobalValue(GV);
577 return true;
578 }
579
580 // If all else fails, try to materialize the value in a register.
581 if (!Addr.getGlobalValue()) {
582 Addr.setReg(getRegForValue(V));
583 return Addr.getReg() != 0;
584 }
585
586 return false;
587}
588
589bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) {
590 EVT evt = TLI.getValueType(DL, Ty, true);
591 // Only handle simple types.
592 if (evt == MVT::Other || !evt.isSimple())
593 return false;
594 VT = evt.getSimpleVT();
595
596 // Handle all legal types, i.e. a register that will directly hold this
597 // value.
598 return TLI.isTypeLegal(VT);
599}
600
601bool MipsFastISel::isTypeSupported(Type *Ty, MVT &VT) {
602 if (Ty->isVectorTy())
603 return false;
604
605 if (isTypeLegal(Ty, VT))
606 return true;
607
608 // If this is a type than can be sign or zero-extended to a basic operation
609 // go ahead and accept it now.
610 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
611 return true;
612
613 return false;
614}
615
616bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
617 if (isTypeLegal(Ty, VT))
618 return true;
619 // We will extend this in a later patch:
620 // If this is a type than can be sign or zero-extended to a basic operation
621 // go ahead and accept it now.
622 if (VT == MVT::i8 || VT == MVT::i16)
623 return true;
624 return false;
625}
626
627// Because of how EmitCmp is called with fast-isel, you can
628// end up with redundant "andi" instructions after the sequences emitted below.
629// We should try and solve this issue in the future.
630//
631bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
632 const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1);
633 bool IsUnsigned = CI->isUnsigned();
634 unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned);
635 if (LeftReg == 0)
636 return false;
637 unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned);
638 if (RightReg == 0)
639 return false;
641
642 switch (P) {
643 default:
644 return false;
645 case CmpInst::ICMP_EQ: {
646 Register TempReg = createResultReg(&Mips::GPR32RegClass);
647 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
648 emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
649 break;
650 }
651 case CmpInst::ICMP_NE: {
652 Register TempReg = createResultReg(&Mips::GPR32RegClass);
653 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
654 emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
655 break;
656 }
658 emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg);
659 break;
661 emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
662 break;
663 case CmpInst::ICMP_UGE: {
664 Register TempReg = createResultReg(&Mips::GPR32RegClass);
665 emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
666 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
667 break;
668 }
669 case CmpInst::ICMP_ULE: {
670 Register TempReg = createResultReg(&Mips::GPR32RegClass);
671 emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
672 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
673 break;
674 }
676 emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg);
677 break;
679 emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
680 break;
681 case CmpInst::ICMP_SGE: {
682 Register TempReg = createResultReg(&Mips::GPR32RegClass);
683 emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
684 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
685 break;
686 }
687 case CmpInst::ICMP_SLE: {
688 Register TempReg = createResultReg(&Mips::GPR32RegClass);
689 emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
690 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
691 break;
692 }
698 case CmpInst::FCMP_OGE: {
699 if (UnsupportedFPMode)
700 return false;
701 bool IsFloat = Left->getType()->isFloatTy();
702 bool IsDouble = Left->getType()->isDoubleTy();
703 if (!IsFloat && !IsDouble)
704 return false;
705 unsigned Opc, CondMovOpc;
706 switch (P) {
708 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
709 CondMovOpc = Mips::MOVT_I;
710 break;
712 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
713 CondMovOpc = Mips::MOVF_I;
714 break;
716 Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32;
717 CondMovOpc = Mips::MOVT_I;
718 break;
720 Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32;
721 CondMovOpc = Mips::MOVT_I;
722 break;
724 Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32;
725 CondMovOpc = Mips::MOVF_I;
726 break;
728 Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32;
729 CondMovOpc = Mips::MOVF_I;
730 break;
731 default:
732 llvm_unreachable("Only switching of a subset of CCs.");
733 }
734 Register RegWithZero = createResultReg(&Mips::GPR32RegClass);
735 Register RegWithOne = createResultReg(&Mips::GPR32RegClass);
736 emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
737 emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
738 emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
739 .addReg(RightReg);
740 emitInst(CondMovOpc, ResultReg)
741 .addReg(RegWithOne)
742 .addReg(Mips::FCC0)
743 .addReg(RegWithZero);
744 break;
745 }
746 }
747 return true;
748}
749
750bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr) {
751 //
752 // more cases will be handled here in following patches.
753 //
754 unsigned Opc;
755 switch (VT.SimpleTy) {
756 case MVT::i32:
757 ResultReg = createResultReg(&Mips::GPR32RegClass);
758 Opc = Mips::LW;
759 break;
760 case MVT::i16:
761 ResultReg = createResultReg(&Mips::GPR32RegClass);
762 Opc = Mips::LHu;
763 break;
764 case MVT::i8:
765 ResultReg = createResultReg(&Mips::GPR32RegClass);
766 Opc = Mips::LBu;
767 break;
768 case MVT::f32:
769 if (UnsupportedFPMode)
770 return false;
771 ResultReg = createResultReg(&Mips::FGR32RegClass);
772 Opc = Mips::LWC1;
773 break;
774 case MVT::f64:
775 if (UnsupportedFPMode)
776 return false;
777 ResultReg = createResultReg(&Mips::AFGR64RegClass);
778 Opc = Mips::LDC1;
779 break;
780 default:
781 return false;
782 }
783 if (Addr.isRegBase()) {
784 simplifyAddress(Addr);
785 emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
786 return true;
787 }
788 if (Addr.isFIBase()) {
789 unsigned FI = Addr.getFI();
790 int64_t Offset = Addr.getOffset();
791 MachineFrameInfo &MFI = MF->getFrameInfo();
792 MachineMemOperand *MMO = MF->getMachineMemOperand(
794 MFI.getObjectSize(FI), Align(4));
795 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
796 .addFrameIndex(FI)
797 .addImm(Offset)
798 .addMemOperand(MMO);
799 return true;
800 }
801 return false;
802}
803
804bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr) {
805 //
806 // more cases will be handled here in following patches.
807 //
808 unsigned Opc;
809 switch (VT.SimpleTy) {
810 case MVT::i8:
811 Opc = Mips::SB;
812 break;
813 case MVT::i16:
814 Opc = Mips::SH;
815 break;
816 case MVT::i32:
817 Opc = Mips::SW;
818 break;
819 case MVT::f32:
820 if (UnsupportedFPMode)
821 return false;
822 Opc = Mips::SWC1;
823 break;
824 case MVT::f64:
825 if (UnsupportedFPMode)
826 return false;
827 Opc = Mips::SDC1;
828 break;
829 default:
830 return false;
831 }
832 if (Addr.isRegBase()) {
833 simplifyAddress(Addr);
834 emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
835 return true;
836 }
837 if (Addr.isFIBase()) {
838 unsigned FI = Addr.getFI();
839 int64_t Offset = Addr.getOffset();
840 MachineFrameInfo &MFI = MF->getFrameInfo();
841 MachineMemOperand *MMO = MF->getMachineMemOperand(
843 MFI.getObjectSize(FI), Align(4));
844 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc))
845 .addReg(SrcReg)
846 .addFrameIndex(FI)
847 .addImm(Offset)
848 .addMemOperand(MMO);
849 return true;
850 }
851 return false;
852}
853
854bool MipsFastISel::selectLogicalOp(const Instruction *I) {
855 MVT VT;
856 if (!isTypeSupported(I->getType(), VT))
857 return false;
858
859 unsigned ResultReg;
860 switch (I->getOpcode()) {
861 default:
862 llvm_unreachable("Unexpected instruction.");
863 case Instruction::And:
864 ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1));
865 break;
866 case Instruction::Or:
867 ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1));
868 break;
869 case Instruction::Xor:
870 ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1));
871 break;
872 }
873
874 if (!ResultReg)
875 return false;
876
877 updateValueMap(I, ResultReg);
878 return true;
879}
880
881bool MipsFastISel::selectLoad(const Instruction *I) {
882 const LoadInst *LI = cast<LoadInst>(I);
883
884 // Atomic loads need special handling.
885 if (LI->isAtomic())
886 return false;
887
888 // Verify we have a legal type before going any further.
889 MVT VT;
890 if (!isLoadTypeLegal(LI->getType(), VT))
891 return false;
892
893 // Underaligned loads need special handling.
894 if (LI->getAlign() < VT.getFixedSizeInBits() / 8 &&
895 !Subtarget->systemSupportsUnalignedAccess())
896 return false;
897
898 // See if we can handle this address.
900 if (!computeAddress(LI->getOperand(0), Addr))
901 return false;
902
903 unsigned ResultReg;
904 if (!emitLoad(VT, ResultReg, Addr))
905 return false;
906 updateValueMap(LI, ResultReg);
907 return true;
908}
909
910bool MipsFastISel::selectStore(const Instruction *I) {
911 const StoreInst *SI = cast<StoreInst>(I);
912
913 Value *Op0 = SI->getOperand(0);
914 unsigned SrcReg = 0;
915
916 // Atomic stores need special handling.
917 if (SI->isAtomic())
918 return false;
919
920 // Verify we have a legal type before going any further.
921 MVT VT;
922 if (!isLoadTypeLegal(SI->getOperand(0)->getType(), VT))
923 return false;
924
925 // Underaligned stores need special handling.
926 if (SI->getAlign() < VT.getFixedSizeInBits() / 8 &&
927 !Subtarget->systemSupportsUnalignedAccess())
928 return false;
929
930 // Get the value to be stored into a register.
931 SrcReg = getRegForValue(Op0);
932 if (SrcReg == 0)
933 return false;
934
935 // See if we can handle this address.
937 if (!computeAddress(SI->getOperand(1), Addr))
938 return false;
939
940 if (!emitStore(VT, SrcReg, Addr))
941 return false;
942 return true;
943}
944
945// This can cause a redundant sltiu to be generated.
946// FIXME: try and eliminate this in a future patch.
947bool MipsFastISel::selectBranch(const Instruction *I) {
948 const BranchInst *BI = cast<BranchInst>(I);
949 MachineBasicBlock *BrBB = FuncInfo.MBB;
950 //
951 // TBB is the basic block for the case where the comparison is true.
952 // FBB is the basic block for the case where the comparison is false.
953 // if (cond) goto TBB
954 // goto FBB
955 // TBB:
956 //
957 MachineBasicBlock *TBB = FuncInfo.getMBB(BI->getSuccessor(0));
958 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->getSuccessor(1));
959
960 // Fold the common case of a conditional branch with a comparison
961 // in the same block.
962 unsigned ZExtCondReg = 0;
963 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
964 if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
965 ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
966 if (!emitCmp(ZExtCondReg, CI))
967 return false;
968 }
969 }
970
971 // For the general case, we need to mask with 1.
972 if (ZExtCondReg == 0) {
973 Register CondReg = getRegForValue(BI->getCondition());
974 if (CondReg == 0)
975 return false;
976
977 ZExtCondReg = emitIntExt(MVT::i1, CondReg, MVT::i32, true);
978 if (ZExtCondReg == 0)
979 return false;
980 }
981
982 BuildMI(*BrBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::BGTZ))
983 .addReg(ZExtCondReg)
984 .addMBB(TBB);
985 finishCondBranch(BI->getParent(), TBB, FBB);
986 return true;
987}
988
989bool MipsFastISel::selectCmp(const Instruction *I) {
990 const CmpInst *CI = cast<CmpInst>(I);
991 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
992 if (!emitCmp(ResultReg, CI))
993 return false;
994 updateValueMap(I, ResultReg);
995 return true;
996}
997
998// Attempt to fast-select a floating-point extend instruction.
999bool MipsFastISel::selectFPExt(const Instruction *I) {
1000 if (UnsupportedFPMode)
1001 return false;
1002 Value *Src = I->getOperand(0);
1003 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1004 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1005
1006 if (SrcVT != MVT::f32 || DestVT != MVT::f64)
1007 return false;
1008
1009 Register SrcReg =
1010 getRegForValue(Src); // this must be a 32bit floating point register class
1011 // maybe we should handle this differently
1012 if (!SrcReg)
1013 return false;
1014
1015 Register DestReg = createResultReg(&Mips::AFGR64RegClass);
1016 emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
1017 updateValueMap(I, DestReg);
1018 return true;
1019}
1020
1021bool MipsFastISel::selectSelect(const Instruction *I) {
1022 assert(isa<SelectInst>(I) && "Expected a select instruction.");
1023
1024 LLVM_DEBUG(dbgs() << "selectSelect\n");
1025
1026 MVT VT;
1027 if (!isTypeSupported(I->getType(), VT) || UnsupportedFPMode) {
1028 LLVM_DEBUG(
1029 dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
1030 return false;
1031 }
1032
1033 unsigned CondMovOpc;
1034 const TargetRegisterClass *RC;
1035
1036 if (VT.isInteger() && !VT.isVector() && VT.getSizeInBits() <= 32) {
1037 CondMovOpc = Mips::MOVN_I_I;
1038 RC = &Mips::GPR32RegClass;
1039 } else if (VT == MVT::f32) {
1040 CondMovOpc = Mips::MOVN_I_S;
1041 RC = &Mips::FGR32RegClass;
1042 } else if (VT == MVT::f64) {
1043 CondMovOpc = Mips::MOVN_I_D32;
1044 RC = &Mips::AFGR64RegClass;
1045 } else
1046 return false;
1047
1048 const SelectInst *SI = cast<SelectInst>(I);
1049 const Value *Cond = SI->getCondition();
1050 Register Src1Reg = getRegForValue(SI->getTrueValue());
1051 Register Src2Reg = getRegForValue(SI->getFalseValue());
1052 Register CondReg = getRegForValue(Cond);
1053
1054 if (!Src1Reg || !Src2Reg || !CondReg)
1055 return false;
1056
1057 Register ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
1058 if (!ZExtCondReg)
1059 return false;
1060
1061 if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true))
1062 return false;
1063
1064 Register ResultReg = createResultReg(RC);
1065 Register TempReg = createResultReg(RC);
1066
1067 if (!ResultReg || !TempReg)
1068 return false;
1069
1070 emitInst(TargetOpcode::COPY, TempReg).addReg(Src2Reg);
1071 emitInst(CondMovOpc, ResultReg)
1072 .addReg(Src1Reg).addReg(ZExtCondReg).addReg(TempReg);
1073 updateValueMap(I, ResultReg);
1074 return true;
1075}
1076
1077// Attempt to fast-select a floating-point truncate instruction.
1078bool MipsFastISel::selectFPTrunc(const Instruction *I) {
1079 if (UnsupportedFPMode)
1080 return false;
1081 Value *Src = I->getOperand(0);
1082 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1083 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1084
1085 if (SrcVT != MVT::f64 || DestVT != MVT::f32)
1086 return false;
1087
1088 Register SrcReg = getRegForValue(Src);
1089 if (!SrcReg)
1090 return false;
1091
1092 Register DestReg = createResultReg(&Mips::FGR32RegClass);
1093 if (!DestReg)
1094 return false;
1095
1096 emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg);
1097 updateValueMap(I, DestReg);
1098 return true;
1099}
1100
1101// Attempt to fast-select a floating-point-to-integer conversion.
1102bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
1103 if (UnsupportedFPMode)
1104 return false;
1105 MVT DstVT, SrcVT;
1106 if (!IsSigned)
1107 return false; // We don't handle this case yet. There is no native
1108 // instruction for this but it can be synthesized.
1109 Type *DstTy = I->getType();
1110 if (!isTypeLegal(DstTy, DstVT))
1111 return false;
1112
1113 if (DstVT != MVT::i32)
1114 return false;
1115
1116 Value *Src = I->getOperand(0);
1117 Type *SrcTy = Src->getType();
1118 if (!isTypeLegal(SrcTy, SrcVT))
1119 return false;
1120
1121 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
1122 return false;
1123
1124 Register SrcReg = getRegForValue(Src);
1125 if (SrcReg == 0)
1126 return false;
1127
1128 // Determine the opcode for the conversion, which takes place
1129 // entirely within FPRs.
1130 Register DestReg = createResultReg(&Mips::GPR32RegClass);
1131 Register TempReg = createResultReg(&Mips::FGR32RegClass);
1132 unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32;
1133
1134 // Generate the convert.
1135 emitInst(Opc, TempReg).addReg(SrcReg);
1136 emitInst(Mips::MFC1, DestReg).addReg(TempReg);
1137
1138 updateValueMap(I, DestReg);
1139 return true;
1140}
1141
1142bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
1143 SmallVectorImpl<MVT> &OutVTs,
1144 unsigned &NumBytes) {
1145 CallingConv::ID CC = CLI.CallConv;
1147 CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
1148 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
1149 // Get a count of how many bytes are to be pushed on the stack.
1150 NumBytes = CCInfo.getStackSize();
1151 // This is the minimum argument area used for A0-A3.
1152 if (NumBytes < 16)
1153 NumBytes = 16;
1154
1155 emitInst(Mips::ADJCALLSTACKDOWN).addImm(16).addImm(0);
1156 // Process the args.
1157 MVT firstMVT;
1158 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1159 CCValAssign &VA = ArgLocs[i];
1160 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
1161 MVT ArgVT = OutVTs[VA.getValNo()];
1162
1163 if (i == 0) {
1164 firstMVT = ArgVT;
1165 if (ArgVT == MVT::f32) {
1166 VA.convertToReg(Mips::F12);
1167 } else if (ArgVT == MVT::f64) {
1168 if (Subtarget->isFP64bit())
1169 VA.convertToReg(Mips::D6_64);
1170 else
1171 VA.convertToReg(Mips::D6);
1172 }
1173 } else if (i == 1) {
1174 if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
1175 if (ArgVT == MVT::f32) {
1176 VA.convertToReg(Mips::F14);
1177 } else if (ArgVT == MVT::f64) {
1178 if (Subtarget->isFP64bit())
1179 VA.convertToReg(Mips::D7_64);
1180 else
1181 VA.convertToReg(Mips::D7);
1182 }
1183 }
1184 }
1185 if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32) || (ArgVT == MVT::i16) ||
1186 (ArgVT == MVT::i8)) &&
1187 VA.isMemLoc()) {
1188 switch (VA.getLocMemOffset()) {
1189 case 0:
1190 VA.convertToReg(Mips::A0);
1191 break;
1192 case 4:
1193 VA.convertToReg(Mips::A1);
1194 break;
1195 case 8:
1196 VA.convertToReg(Mips::A2);
1197 break;
1198 case 12:
1199 VA.convertToReg(Mips::A3);
1200 break;
1201 default:
1202 break;
1203 }
1204 }
1205 Register ArgReg = getRegForValue(ArgVal);
1206 if (!ArgReg)
1207 return false;
1208
1209 // Handle arg promotion: SExt, ZExt, AExt.
1210 switch (VA.getLocInfo()) {
1211 case CCValAssign::Full:
1212 break;
1213 case CCValAssign::AExt:
1214 case CCValAssign::SExt: {
1215 MVT DestVT = VA.getLocVT();
1216 MVT SrcVT = ArgVT;
1217 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
1218 if (!ArgReg)
1219 return false;
1220 break;
1221 }
1222 case CCValAssign::ZExt: {
1223 MVT DestVT = VA.getLocVT();
1224 MVT SrcVT = ArgVT;
1225 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
1226 if (!ArgReg)
1227 return false;
1228 break;
1229 }
1230 default:
1231 llvm_unreachable("Unknown arg promotion!");
1232 }
1233
1234 // Now copy/store arg to correct locations.
1235 if (VA.isRegLoc() && !VA.needsCustom()) {
1236 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1237 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
1238 CLI.OutRegs.push_back(VA.getLocReg());
1239 } else if (VA.needsCustom()) {
1240 llvm_unreachable("Mips does not use custom args.");
1241 return false;
1242 } else {
1243 //
1244 // FIXME: This path will currently return false. It was copied
1245 // from the AArch64 port and should be essentially fine for Mips too.
1246 // The work to finish up this path will be done in a follow-on patch.
1247 //
1248 assert(VA.isMemLoc() && "Assuming store on stack.");
1249 // Don't emit stores for undef values.
1250 if (isa<UndefValue>(ArgVal))
1251 continue;
1252
1253 // Need to store on the stack.
1254 // FIXME: This alignment is incorrect but this path is disabled
1255 // for now (will return false). We need to determine the right alignment
1256 // based on the normal alignment for the underlying machine type.
1257 //
1258 unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
1259
1260 unsigned BEAlign = 0;
1261 if (ArgSize < 8 && !Subtarget->isLittle())
1262 BEAlign = 8 - ArgSize;
1263
1264 Address Addr;
1265 Addr.setKind(Address::RegBase);
1266 Addr.setReg(Mips::SP);
1267 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
1268
1269 Align Alignment = DL.getABITypeAlign(ArgVal->getType());
1270 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
1271 MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()),
1272 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
1273 (void)(MMO);
1274 // if (!emitStore(ArgVT, ArgReg, Addr, MMO))
1275 return false; // can't store on the stack yet.
1276 }
1277 }
1278
1279 return true;
1280}
1281
1282bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
1283 unsigned NumBytes) {
1284 CallingConv::ID CC = CLI.CallConv;
1285 emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0);
1286 if (RetVT != MVT::isVoid) {
1288 MipsCCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
1289
1290 CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips, CLI.RetTy,
1291 CLI.Symbol ? CLI.Symbol->getName().data()
1292 : nullptr);
1293
1294 // Only handle a single return value.
1295 if (RVLocs.size() != 1)
1296 return false;
1297 // Copy all of the result registers out of their specified physreg.
1298 MVT CopyVT = RVLocs[0].getValVT();
1299 // Special handling for extended integers.
1300 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
1301 CopyVT = MVT::i32;
1302
1303 Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1304 if (!ResultReg)
1305 return false;
1306 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1307 TII.get(TargetOpcode::COPY),
1308 ResultReg).addReg(RVLocs[0].getLocReg());
1309 CLI.InRegs.push_back(RVLocs[0].getLocReg());
1310
1311 CLI.ResultReg = ResultReg;
1312 CLI.NumResultRegs = 1;
1313 }
1314 return true;
1315}
1316
1317bool MipsFastISel::fastLowerArguments() {
1318 LLVM_DEBUG(dbgs() << "fastLowerArguments\n");
1319
1320 if (!FuncInfo.CanLowerReturn) {
1321 LLVM_DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
1322 return false;
1323 }
1324
1325 const Function *F = FuncInfo.Fn;
1326 if (F->isVarArg()) {
1327 LLVM_DEBUG(dbgs() << ".. gave up (varargs)\n");
1328 return false;
1329 }
1330
1331 CallingConv::ID CC = F->getCallingConv();
1332 if (CC != CallingConv::C) {
1333 LLVM_DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
1334 return false;
1335 }
1336
1337 std::array<MCPhysReg, 4> GPR32ArgRegs = {{Mips::A0, Mips::A1, Mips::A2,
1338 Mips::A3}};
1339 std::array<MCPhysReg, 2> FGR32ArgRegs = {{Mips::F12, Mips::F14}};
1340 std::array<MCPhysReg, 2> AFGR64ArgRegs = {{Mips::D6, Mips::D7}};
1341 auto NextGPR32 = GPR32ArgRegs.begin();
1342 auto NextFGR32 = FGR32ArgRegs.begin();
1343 auto NextAFGR64 = AFGR64ArgRegs.begin();
1344
1345 struct AllocatedReg {
1346 const TargetRegisterClass *RC;
1347 unsigned Reg;
1348 AllocatedReg(const TargetRegisterClass *RC, unsigned Reg)
1349 : RC(RC), Reg(Reg) {}
1350 };
1351
1352 // Only handle simple cases. i.e. All arguments are directly mapped to
1353 // registers of the appropriate type.
1355 for (const auto &FormalArg : F->args()) {
1356 if (FormalArg.hasAttribute(Attribute::InReg) ||
1357 FormalArg.hasAttribute(Attribute::StructRet) ||
1358 FormalArg.hasAttribute(Attribute::ByVal)) {
1359 LLVM_DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
1360 return false;
1361 }
1362
1363 Type *ArgTy = FormalArg.getType();
1364 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) {
1365 LLVM_DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
1366 return false;
1367 }
1368
1369 EVT ArgVT = TLI.getValueType(DL, ArgTy);
1370 LLVM_DEBUG(dbgs() << ".. " << FormalArg.getArgNo() << ": "
1371 << ArgVT << "\n");
1372 if (!ArgVT.isSimple()) {
1373 LLVM_DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
1374 return false;
1375 }
1376
1377 switch (ArgVT.getSimpleVT().SimpleTy) {
1378 case MVT::i1:
1379 case MVT::i8:
1380 case MVT::i16:
1381 if (!FormalArg.hasAttribute(Attribute::SExt) &&
1382 !FormalArg.hasAttribute(Attribute::ZExt)) {
1383 // It must be any extend, this shouldn't happen for clang-generated IR
1384 // so just fall back on SelectionDAG.
1385 LLVM_DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
1386 return false;
1387 }
1388
1389 if (NextGPR32 == GPR32ArgRegs.end()) {
1390 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1391 return false;
1392 }
1393
1394 LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1395 Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1396
1397 // Allocating any GPR32 prohibits further use of floating point arguments.
1398 NextFGR32 = FGR32ArgRegs.end();
1399 NextAFGR64 = AFGR64ArgRegs.end();
1400 break;
1401
1402 case MVT::i32:
1403 if (FormalArg.hasAttribute(Attribute::ZExt)) {
1404 // The O32 ABI does not permit a zero-extended i32.
1405 LLVM_DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
1406 return false;
1407 }
1408
1409 if (NextGPR32 == GPR32ArgRegs.end()) {
1410 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1411 return false;
1412 }
1413
1414 LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1415 Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1416
1417 // Allocating any GPR32 prohibits further use of floating point arguments.
1418 NextFGR32 = FGR32ArgRegs.end();
1419 NextAFGR64 = AFGR64ArgRegs.end();
1420 break;
1421
1422 case MVT::f32:
1423 if (UnsupportedFPMode) {
1424 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1425 return false;
1426 }
1427 if (NextFGR32 == FGR32ArgRegs.end()) {
1428 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
1429 return false;
1430 }
1431 LLVM_DEBUG(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
1432 Allocation.emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
1433 // Allocating an FGR32 also allocates the super-register AFGR64, and
1434 // ABI rules require us to skip the corresponding GPR32.
1435 if (NextGPR32 != GPR32ArgRegs.end())
1436 NextGPR32++;
1437 if (NextAFGR64 != AFGR64ArgRegs.end())
1438 NextAFGR64++;
1439 break;
1440
1441 case MVT::f64:
1442 if (UnsupportedFPMode) {
1443 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1444 return false;
1445 }
1446 if (NextAFGR64 == AFGR64ArgRegs.end()) {
1447 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
1448 return false;
1449 }
1450 LLVM_DEBUG(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
1451 Allocation.emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
1452 // Allocating an FGR32 also allocates the super-register AFGR64, and
1453 // ABI rules require us to skip the corresponding GPR32 pair.
1454 if (NextGPR32 != GPR32ArgRegs.end())
1455 NextGPR32++;
1456 if (NextGPR32 != GPR32ArgRegs.end())
1457 NextGPR32++;
1458 if (NextFGR32 != FGR32ArgRegs.end())
1459 NextFGR32++;
1460 break;
1461
1462 default:
1463 LLVM_DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
1464 return false;
1465 }
1466 }
1467
1468 for (const auto &FormalArg : F->args()) {
1469 unsigned ArgNo = FormalArg.getArgNo();
1470 unsigned SrcReg = Allocation[ArgNo].Reg;
1471 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
1472 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
1473 // Without this, EmitLiveInCopies may eliminate the livein if its only
1474 // use is a bitcast (which isn't turned into an instruction).
1475 Register ResultReg = createResultReg(Allocation[ArgNo].RC);
1476 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1477 TII.get(TargetOpcode::COPY), ResultReg)
1478 .addReg(DstReg, getKillRegState(true));
1479 updateValueMap(&FormalArg, ResultReg);
1480 }
1481
1482 // Calculate the size of the incoming arguments area.
1483 // We currently reject all the cases where this would be non-zero.
1484 unsigned IncomingArgSizeInBytes = 0;
1485
1486 // Account for the reserved argument area on ABI's that have one (O32).
1487 // It seems strange to do this on the caller side but it's necessary in
1488 // SelectionDAG's implementation.
1489 IncomingArgSizeInBytes = std::min(getABI().GetCalleeAllocdArgSizeInBytes(CC),
1490 IncomingArgSizeInBytes);
1491
1492 MF->getInfo<MipsFunctionInfo>()->setFormalArgInfo(IncomingArgSizeInBytes,
1493 false);
1494
1495 return true;
1496}
1497
1498bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
1499 CallingConv::ID CC = CLI.CallConv;
1500 bool IsTailCall = CLI.IsTailCall;
1501 bool IsVarArg = CLI.IsVarArg;
1502 const Value *Callee = CLI.Callee;
1503 MCSymbol *Symbol = CLI.Symbol;
1504
1505 // Do not handle FastCC.
1506 if (CC == CallingConv::Fast)
1507 return false;
1508
1509 // Allow SelectionDAG isel to handle tail calls.
1510 if (IsTailCall)
1511 return false;
1512
1513 // Let SDISel handle vararg functions.
1514 if (IsVarArg)
1515 return false;
1516
1517 // FIXME: Only handle *simple* calls for now.
1518 MVT RetVT;
1519 if (CLI.RetTy->isVoidTy())
1520 RetVT = MVT::isVoid;
1521 else if (!isTypeSupported(CLI.RetTy, RetVT))
1522 return false;
1523
1524 for (auto Flag : CLI.OutFlags)
1525 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
1526 return false;
1527
1528 // Set up the argument vectors.
1529 SmallVector<MVT, 16> OutVTs;
1530 OutVTs.reserve(CLI.OutVals.size());
1531
1532 for (auto *Val : CLI.OutVals) {
1533 MVT VT;
1534 if (!isTypeLegal(Val->getType(), VT) &&
1535 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
1536 return false;
1537
1538 // We don't handle vector parameters yet.
1539 if (VT.isVector() || VT.getSizeInBits() > 64)
1540 return false;
1541
1542 OutVTs.push_back(VT);
1543 }
1544
1545 Address Addr;
1546 if (!computeCallAddress(Callee, Addr))
1547 return false;
1548
1549 // Handle the arguments now that we've gotten them.
1550 unsigned NumBytes;
1551 if (!processCallArgs(CLI, OutVTs, NumBytes))
1552 return false;
1553
1554 if (!Addr.getGlobalValue())
1555 return false;
1556
1557 // Issue the call.
1558 unsigned DestAddress;
1559 if (Symbol)
1560 DestAddress = materializeExternalCallSym(Symbol);
1561 else
1562 DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32);
1563 emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress);
1565 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::JALR),
1566 Mips::RA).addReg(Mips::T9);
1567
1568 // Add implicit physical register uses to the call.
1569 for (auto Reg : CLI.OutRegs)
1570 MIB.addReg(Reg, RegState::Implicit);
1571
1572 // Add a register mask with the call-preserved registers.
1573 // Proper defs for return values will be added by setPhysRegsDeadExcept().
1574 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
1575
1576 CLI.Call = MIB;
1577
1578 if (EmitJalrReloc && !Subtarget->inMips16Mode()) {
1579 // Attach callee address to the instruction, let asm printer emit
1580 // .reloc R_MIPS_JALR.
1581 if (Symbol)
1582 MIB.addSym(Symbol, MipsII::MO_JALR);
1583 else
1584 MIB.addSym(FuncInfo.MF->getContext().getOrCreateSymbol(
1585 Addr.getGlobalValue()->getName()), MipsII::MO_JALR);
1586 }
1587
1588 // Finish off the call including any return values.
1589 return finishCall(CLI, RetVT, NumBytes);
1590}
1591
1592bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
1593 switch (II->getIntrinsicID()) {
1594 default:
1595 return false;
1596 case Intrinsic::bswap: {
1597 Type *RetTy = II->getCalledFunction()->getReturnType();
1598
1599 MVT VT;
1600 if (!isTypeSupported(RetTy, VT))
1601 return false;
1602
1603 Register SrcReg = getRegForValue(II->getOperand(0));
1604 if (SrcReg == 0)
1605 return false;
1606 Register DestReg = createResultReg(&Mips::GPR32RegClass);
1607 if (DestReg == 0)
1608 return false;
1609 if (VT == MVT::i16) {
1610 if (Subtarget->hasMips32r2()) {
1611 emitInst(Mips::WSBH, DestReg).addReg(SrcReg);
1612 updateValueMap(II, DestReg);
1613 return true;
1614 } else {
1615 unsigned TempReg[3];
1616 for (unsigned &R : TempReg) {
1617 R = createResultReg(&Mips::GPR32RegClass);
1618 if (R == 0)
1619 return false;
1620 }
1621 emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8);
1622 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(8);
1623 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[1]).addImm(0xFF);
1624 emitInst(Mips::OR, DestReg).addReg(TempReg[0]).addReg(TempReg[2]);
1625 updateValueMap(II, DestReg);
1626 return true;
1627 }
1628 } else if (VT == MVT::i32) {
1629 if (Subtarget->hasMips32r2()) {
1630 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1631 emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
1632 emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
1633 updateValueMap(II, DestReg);
1634 return true;
1635 } else {
1636 unsigned TempReg[8];
1637 for (unsigned &R : TempReg) {
1638 R = createResultReg(&Mips::GPR32RegClass);
1639 if (R == 0)
1640 return false;
1641 }
1642
1643 emitInst(Mips::SRL, TempReg[0]).addReg(SrcReg).addImm(8);
1644 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(24);
1645 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[0]).addImm(0xFF00);
1646 emitInst(Mips::OR, TempReg[3]).addReg(TempReg[1]).addReg(TempReg[2]);
1647
1648 emitInst(Mips::ANDi, TempReg[4]).addReg(SrcReg).addImm(0xFF00);
1649 emitInst(Mips::SLL, TempReg[5]).addReg(TempReg[4]).addImm(8);
1650
1651 emitInst(Mips::SLL, TempReg[6]).addReg(SrcReg).addImm(24);
1652 emitInst(Mips::OR, TempReg[7]).addReg(TempReg[3]).addReg(TempReg[5]);
1653 emitInst(Mips::OR, DestReg).addReg(TempReg[6]).addReg(TempReg[7]);
1654 updateValueMap(II, DestReg);
1655 return true;
1656 }
1657 }
1658 return false;
1659 }
1660 case Intrinsic::memcpy:
1661 case Intrinsic::memmove: {
1662 const auto *MTI = cast<MemTransferInst>(II);
1663 // Don't handle volatile.
1664 if (MTI->isVolatile())
1665 return false;
1666 if (!MTI->getLength()->getType()->isIntegerTy(32))
1667 return false;
1668 const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
1669 return lowerCallTo(II, IntrMemName, II->arg_size() - 1);
1670 }
1671 case Intrinsic::memset: {
1672 const MemSetInst *MSI = cast<MemSetInst>(II);
1673 // Don't handle volatile.
1674 if (MSI->isVolatile())
1675 return false;
1676 if (!MSI->getLength()->getType()->isIntegerTy(32))
1677 return false;
1678 return lowerCallTo(II, "memset", II->arg_size() - 1);
1679 }
1680 }
1681 return false;
1682}
1683
1684bool MipsFastISel::selectRet(const Instruction *I) {
1685 const Function &F = *I->getParent()->getParent();
1686 const ReturnInst *Ret = cast<ReturnInst>(I);
1687
1688 LLVM_DEBUG(dbgs() << "selectRet\n");
1689
1690 if (!FuncInfo.CanLowerReturn)
1691 return false;
1692
1693 // Build a list of return value registers.
1695
1696 if (Ret->getNumOperands() > 0) {
1697 CallingConv::ID CC = F.getCallingConv();
1698
1699 // Do not handle FastCC.
1700 if (CC == CallingConv::Fast)
1701 return false;
1702
1704 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1705
1706 // Analyze operands of the call, assigning locations to each operand.
1708 MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
1709 I->getContext());
1710 CCAssignFn *RetCC = RetCC_Mips;
1711 CCInfo.AnalyzeReturn(Outs, RetCC);
1712
1713 // Only handle a single return value for now.
1714 if (ValLocs.size() != 1)
1715 return false;
1716
1717 CCValAssign &VA = ValLocs[0];
1718 const Value *RV = Ret->getOperand(0);
1719
1720 // Don't bother handling odd stuff for now.
1721 if ((VA.getLocInfo() != CCValAssign::Full) &&
1722 (VA.getLocInfo() != CCValAssign::BCvt))
1723 return false;
1724
1725 // Only handle register returns for now.
1726 if (!VA.isRegLoc())
1727 return false;
1728
1729 Register Reg = getRegForValue(RV);
1730 if (Reg == 0)
1731 return false;
1732
1733 unsigned SrcReg = Reg + VA.getValNo();
1734 Register DestReg = VA.getLocReg();
1735 // Avoid a cross-class copy. This is very unlikely.
1736 if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1737 return false;
1738
1739 EVT RVEVT = TLI.getValueType(DL, RV->getType());
1740 if (!RVEVT.isSimple())
1741 return false;
1742
1743 if (RVEVT.isVector())
1744 return false;
1745
1746 MVT RVVT = RVEVT.getSimpleVT();
1747 if (RVVT == MVT::f128)
1748 return false;
1749
1750 // Do not handle FGR64 returns for now.
1751 if (RVVT == MVT::f64 && UnsupportedFPMode) {
1752 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
1753 return false;
1754 }
1755
1756 MVT DestVT = VA.getValVT();
1757 // Special handling for extended integers.
1758 if (RVVT != DestVT) {
1759 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1760 return false;
1761
1762 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
1763 bool IsZExt = Outs[0].Flags.isZExt();
1764 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
1765 if (SrcReg == 0)
1766 return false;
1767 }
1768 }
1769
1770 // Make the copy.
1771 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1772 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1773
1774 // Add register to return instruction.
1775 RetRegs.push_back(VA.getLocReg());
1776 }
1777 MachineInstrBuilder MIB = emitInst(Mips::RetRA);
1778 for (unsigned Reg : RetRegs)
1779 MIB.addReg(Reg, RegState::Implicit);
1780 return true;
1781}
1782
1783bool MipsFastISel::selectTrunc(const Instruction *I) {
1784 // The high bits for a type smaller than the register size are assumed to be
1785 // undefined.
1786 Value *Op = I->getOperand(0);
1787
1788 EVT SrcVT, DestVT;
1789 SrcVT = TLI.getValueType(DL, Op->getType(), true);
1790 DestVT = TLI.getValueType(DL, I->getType(), true);
1791
1792 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1793 return false;
1794 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1795 return false;
1796
1797 Register SrcReg = getRegForValue(Op);
1798 if (!SrcReg)
1799 return false;
1800
1801 // Because the high bits are undefined, a truncate doesn't generate
1802 // any code.
1803 updateValueMap(I, SrcReg);
1804 return true;
1805}
1806
1807bool MipsFastISel::selectIntExt(const Instruction *I) {
1808 Type *DestTy = I->getType();
1809 Value *Src = I->getOperand(0);
1810 Type *SrcTy = Src->getType();
1811
1812 bool isZExt = isa<ZExtInst>(I);
1813 Register SrcReg = getRegForValue(Src);
1814 if (!SrcReg)
1815 return false;
1816
1817 EVT SrcEVT, DestEVT;
1818 SrcEVT = TLI.getValueType(DL, SrcTy, true);
1819 DestEVT = TLI.getValueType(DL, DestTy, true);
1820 if (!SrcEVT.isSimple())
1821 return false;
1822 if (!DestEVT.isSimple())
1823 return false;
1824
1825 MVT SrcVT = SrcEVT.getSimpleVT();
1826 MVT DestVT = DestEVT.getSimpleVT();
1827 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1828
1829 if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
1830 return false;
1831 updateValueMap(I, ResultReg);
1832 return true;
1833}
1834
1835bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1836 unsigned DestReg) {
1837 unsigned ShiftAmt;
1838 switch (SrcVT.SimpleTy) {
1839 default:
1840 return false;
1841 case MVT::i8:
1842 ShiftAmt = 24;
1843 break;
1844 case MVT::i16:
1845 ShiftAmt = 16;
1846 break;
1847 }
1848 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1849 emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
1850 emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
1851 return true;
1852}
1853
1854bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1855 unsigned DestReg) {
1856 switch (SrcVT.SimpleTy) {
1857 default:
1858 return false;
1859 case MVT::i8:
1860 emitInst(Mips::SEB, DestReg).addReg(SrcReg);
1861 break;
1862 case MVT::i16:
1863 emitInst(Mips::SEH, DestReg).addReg(SrcReg);
1864 break;
1865 }
1866 return true;
1867}
1868
1869bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1870 unsigned DestReg) {
1871 if ((DestVT != MVT::i32) && (DestVT != MVT::i16))
1872 return false;
1873 if (Subtarget->hasMips32r2())
1874 return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg);
1875 return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg);
1876}
1877
1878bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1879 unsigned DestReg) {
1880 int64_t Imm;
1881
1882 switch (SrcVT.SimpleTy) {
1883 default:
1884 return false;
1885 case MVT::i1:
1886 Imm = 1;
1887 break;
1888 case MVT::i8:
1889 Imm = 0xff;
1890 break;
1891 case MVT::i16:
1892 Imm = 0xffff;
1893 break;
1894 }
1895
1896 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(Imm);
1897 return true;
1898}
1899
1900bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1901 unsigned DestReg, bool IsZExt) {
1902 // FastISel does not have plumbing to deal with extensions where the SrcVT or
1903 // DestVT are odd things, so test to make sure that they are both types we can
1904 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
1905 // bail out to SelectionDAG.
1906 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && (DestVT != MVT::i32)) ||
1907 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && (SrcVT != MVT::i16)))
1908 return false;
1909 if (IsZExt)
1910 return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg);
1911 return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg);
1912}
1913
1914unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1915 bool isZExt) {
1916 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1917 bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
1918 return Success ? DestReg : 0;
1919}
1920
1921bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) {
1922 EVT DestEVT = TLI.getValueType(DL, I->getType(), true);
1923 if (!DestEVT.isSimple())
1924 return false;
1925
1926 MVT DestVT = DestEVT.getSimpleVT();
1927 if (DestVT != MVT::i32)
1928 return false;
1929
1930 unsigned DivOpc;
1931 switch (ISDOpcode) {
1932 default:
1933 return false;
1934 case ISD::SDIV:
1935 case ISD::SREM:
1936 DivOpc = Mips::SDIV;
1937 break;
1938 case ISD::UDIV:
1939 case ISD::UREM:
1940 DivOpc = Mips::UDIV;
1941 break;
1942 }
1943
1944 Register Src0Reg = getRegForValue(I->getOperand(0));
1945 Register Src1Reg = getRegForValue(I->getOperand(1));
1946 if (!Src0Reg || !Src1Reg)
1947 return false;
1948
1949 emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
1950 emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
1951
1952 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1953 if (!ResultReg)
1954 return false;
1955
1956 unsigned MFOpc = (ISDOpcode == ISD::SREM || ISDOpcode == ISD::UREM)
1957 ? Mips::MFHI
1958 : Mips::MFLO;
1959 emitInst(MFOpc, ResultReg);
1960
1961 updateValueMap(I, ResultReg);
1962 return true;
1963}
1964
1965bool MipsFastISel::selectShift(const Instruction *I) {
1966 MVT RetVT;
1967
1968 if (!isTypeSupported(I->getType(), RetVT))
1969 return false;
1970
1971 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1972 if (!ResultReg)
1973 return false;
1974
1975 unsigned Opcode = I->getOpcode();
1976 const Value *Op0 = I->getOperand(0);
1977 Register Op0Reg = getRegForValue(Op0);
1978 if (!Op0Reg)
1979 return false;
1980
1981 // If AShr or LShr, then we need to make sure the operand0 is sign extended.
1982 if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) {
1983 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1984 if (!TempReg)
1985 return false;
1986
1987 MVT Op0MVT = TLI.getValueType(DL, Op0->getType(), true).getSimpleVT();
1988 bool IsZExt = Opcode == Instruction::LShr;
1989 if (!emitIntExt(Op0MVT, Op0Reg, MVT::i32, TempReg, IsZExt))
1990 return false;
1991
1992 Op0Reg = TempReg;
1993 }
1994
1995 if (const auto *C = dyn_cast<ConstantInt>(I->getOperand(1))) {
1996 uint64_t ShiftVal = C->getZExtValue();
1997
1998 switch (Opcode) {
1999 default:
2000 llvm_unreachable("Unexpected instruction.");
2001 case Instruction::Shl:
2002 Opcode = Mips::SLL;
2003 break;
2004 case Instruction::AShr:
2005 Opcode = Mips::SRA;
2006 break;
2007 case Instruction::LShr:
2008 Opcode = Mips::SRL;
2009 break;
2010 }
2011
2012 emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal);
2013 updateValueMap(I, ResultReg);
2014 return true;
2015 }
2016
2017 Register Op1Reg = getRegForValue(I->getOperand(1));
2018 if (!Op1Reg)
2019 return false;
2020
2021 switch (Opcode) {
2022 default:
2023 llvm_unreachable("Unexpected instruction.");
2024 case Instruction::Shl:
2025 Opcode = Mips::SLLV;
2026 break;
2027 case Instruction::AShr:
2028 Opcode = Mips::SRAV;
2029 break;
2030 case Instruction::LShr:
2031 Opcode = Mips::SRLV;
2032 break;
2033 }
2034
2035 emitInst(Opcode, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
2036 updateValueMap(I, ResultReg);
2037 return true;
2038}
2039
2040bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
2041 switch (I->getOpcode()) {
2042 default:
2043 break;
2044 case Instruction::Load:
2045 return selectLoad(I);
2046 case Instruction::Store:
2047 return selectStore(I);
2048 case Instruction::SDiv:
2049 if (!selectBinaryOp(I, ISD::SDIV))
2050 return selectDivRem(I, ISD::SDIV);
2051 return true;
2052 case Instruction::UDiv:
2053 if (!selectBinaryOp(I, ISD::UDIV))
2054 return selectDivRem(I, ISD::UDIV);
2055 return true;
2056 case Instruction::SRem:
2057 if (!selectBinaryOp(I, ISD::SREM))
2058 return selectDivRem(I, ISD::SREM);
2059 return true;
2060 case Instruction::URem:
2061 if (!selectBinaryOp(I, ISD::UREM))
2062 return selectDivRem(I, ISD::UREM);
2063 return true;
2064 case Instruction::Shl:
2065 case Instruction::LShr:
2066 case Instruction::AShr:
2067 return selectShift(I);
2068 case Instruction::And:
2069 case Instruction::Or:
2070 case Instruction::Xor:
2071 return selectLogicalOp(I);
2072 case Instruction::Br:
2073 return selectBranch(I);
2074 case Instruction::Ret:
2075 return selectRet(I);
2076 case Instruction::Trunc:
2077 return selectTrunc(I);
2078 case Instruction::ZExt:
2079 case Instruction::SExt:
2080 return selectIntExt(I);
2081 case Instruction::FPTrunc:
2082 return selectFPTrunc(I);
2083 case Instruction::FPExt:
2084 return selectFPExt(I);
2085 case Instruction::FPToSI:
2086 return selectFPToInt(I, /*isSigned*/ true);
2087 case Instruction::FPToUI:
2088 return selectFPToInt(I, /*isSigned*/ false);
2089 case Instruction::ICmp:
2090 case Instruction::FCmp:
2091 return selectCmp(I);
2092 case Instruction::Select:
2093 return selectSelect(I);
2094 }
2095 return false;
2096}
2097
2098unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
2099 bool IsUnsigned) {
2100 Register VReg = getRegForValue(V);
2101 if (VReg == 0)
2102 return 0;
2103 MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT();
2104
2105 if (VMVT == MVT::i1)
2106 return 0;
2107
2108 if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
2109 Register TempReg = createResultReg(&Mips::GPR32RegClass);
2110 if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
2111 return 0;
2112 VReg = TempReg;
2113 }
2114 return VReg;
2115}
2116
2117void MipsFastISel::simplifyAddress(Address &Addr) {
2118 if (!isInt<16>(Addr.getOffset())) {
2119 unsigned TempReg =
2120 materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
2121 Register DestReg = createResultReg(&Mips::GPR32RegClass);
2122 emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
2123 Addr.setReg(DestReg);
2124 Addr.setOffset(0);
2125 }
2126}
2127
2128unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2129 const TargetRegisterClass *RC,
2130 unsigned Op0, unsigned Op1) {
2131 // We treat the MUL instruction in a special way because it clobbers
2132 // the HI0 & LO0 registers. The TableGen definition of this instruction can
2133 // mark these registers only as implicitly defined. As a result, the
2134 // register allocator runs out of registers when this instruction is
2135 // followed by another instruction that defines the same registers too.
2136 // We can fix this by explicitly marking those registers as dead.
2137 if (MachineInstOpcode == Mips::MUL) {
2138 Register ResultReg = createResultReg(RC);
2139 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2140 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2141 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2142 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2143 .addReg(Op0)
2144 .addReg(Op1)
2147 return ResultReg;
2148 }
2149
2150 return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op1);
2151}
2152
2153namespace llvm {
2154
2156 const TargetLibraryInfo *libInfo) {
2157 return new MipsFastISel(funcInfo, libInfo);
2158}
2159
2160} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV),...
static void emitLoad(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPostDec)
Emit a load-pair instruction for frame-destroy.
static void emitStore(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPreDec)
Emit a store-pair instruction for frame-setup.
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:282
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
uint64_t Addr
Symbol * Sym
Definition: ELF_riscv.cpp:479
This file defines the FastISel class.
const HexagonInstrInfo * TII
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
unsigned Reg
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
cl::opt< bool > EmitJalrReloc
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) LLVM_ATTRIBUTE_UNUSED
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, ArrayRef< MCPhysReg > F64Regs)
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
support::ulittle16_t & Lo
Definition: aarch32.cpp:204
support::ulittle16_t & Hi
Definition: aarch32.cpp:203
APInt bitcastToAPInt() const
Definition: APFloat.h:1346
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1520
an instruction to allocate memory on the stack
Definition: Instructions.h:63
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:99
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
void convertToReg(MCRegister Reg)
Register getLocReg() const
LocInfo getLocInfo() const
bool needsCustom() const
bool isMemLoc() const
int64_t getLocMemOffset() const
unsigned getValNo() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:676
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:702
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:703
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:679
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:677
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:678
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:697
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:696
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:700
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:680
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:701
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:689
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:699
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:763
bool isUnsigned() const
Definition: InstrTypes.h:934
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1108
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:271
const APFloat & getValueAPF() const
Definition: Constants.h:314
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:163
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
Definition: FastISel.cpp:1948
MachineFrameInfo & MFI
Definition: FastISel.h:206
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:473
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1946
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
Definition: FastISel.cpp:2067
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1944
const TargetInstrInfo & TII
Definition: FastISel.h:211
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Definition: FastISel.h:212
const TargetMachine & TM
Definition: FastISel.h:209
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:476
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:263
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
bool hasInternalLinkage() const
Definition: GlobalValue.h:526
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:211
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Machine Value Type.
SimpleValueType SimpleTy
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Value * getLength() const
bool isVolatile() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
bool isFP64bit() const
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
const MipsTargetLowering * getTargetLowering() const override
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Return a value (possibly void), from a function.
This class represents the LLVM 'select' instruction.
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void reserve(size_type N)
Definition: SmallVector.h:663
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:567
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:596
Class to represent struct types.
Definition: DerivedTypes.h:218
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:261
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:258
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:228
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition: ilist_node.h:32
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:709
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ MO_GOT
MO_GOT - Represents the offset into the global offset table at which the address the relocation entry...
Definition: MipsBaseInfo.h:38
@ MO_JALR
Helper operand used to generate R_MIPS_JALR.
Definition: MipsBaseInfo.h:95
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Define
Register definition.
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:56
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
gep_type_iterator gep_type_begin(const User *GEP)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.