LLVM 23.0.0git
MipsFastISel.cpp
Go to the documentation of this file.
1//===- MipsFastISel.cpp - Mips FastISel implementation --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the MIPS-specific support for the FastISel class.
11/// Some of the target-specific code is generated by tablegen in the file
12/// MipsGenFastISel.inc, which is #included here.
13///
14//===----------------------------------------------------------------------===//
15
18#include "MipsCCState.h"
19#include "MipsISelLowering.h"
20#include "MipsInstrInfo.h"
21#include "MipsMachineFunction.h"
22#include "MipsSubtarget.h"
23#include "MipsTargetMachine.h"
24#include "llvm/ADT/APInt.h"
25#include "llvm/ADT/DenseMap.h"
41#include "llvm/IR/Attributes.h"
42#include "llvm/IR/CallingConv.h"
43#include "llvm/IR/Constant.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/Function.h"
48#include "llvm/IR/GlobalValue.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instruction.h"
54#include "llvm/IR/Operator.h"
55#include "llvm/IR/Type.h"
56#include "llvm/IR/User.h"
57#include "llvm/IR/Value.h"
58#include "llvm/MC/MCContext.h"
59#include "llvm/MC/MCInstrDesc.h"
60#include "llvm/MC/MCSymbol.h"
63#include "llvm/Support/Debug.h"
67#include <algorithm>
68#include <array>
69#include <cassert>
70#include <cstdint>
71
72#define DEBUG_TYPE "mips-fastisel"
73
74using namespace llvm;
75
77
78namespace {
79
80class MipsFastISel final : public FastISel {
81
82 // All possible address modes.
83 class Address {
84 public:
85 enum BaseKind { RegBase, FrameIndexBase };
86
87 private:
88 BaseKind Kind = RegBase;
89 union {
90 unsigned Reg;
91 int FI;
92 } Base;
93
94 int64_t Offset = 0;
95
96 const GlobalValue *GV = nullptr;
97
98 public:
99 // Innocuous defaults for our address.
100 Address() { Base.Reg = 0; }
101
102 void setKind(BaseKind K) { Kind = K; }
103 BaseKind getKind() const { return Kind; }
104 bool isRegBase() const { return Kind == RegBase; }
105 bool isFIBase() const { return Kind == FrameIndexBase; }
106
107 void setReg(unsigned Reg) {
108 assert(isRegBase() && "Invalid base register access!");
109 Base.Reg = Reg;
110 }
111
112 unsigned getReg() const {
113 assert(isRegBase() && "Invalid base register access!");
114 return Base.Reg;
115 }
116
117 void setFI(unsigned FI) {
118 assert(isFIBase() && "Invalid base frame index access!");
119 Base.FI = FI;
120 }
121
122 unsigned getFI() const {
123 assert(isFIBase() && "Invalid base frame index access!");
124 return Base.FI;
125 }
126
127 void setOffset(int64_t Offset_) { Offset = Offset_; }
128 int64_t getOffset() const { return Offset; }
129 void setGlobalValue(const GlobalValue *G) { GV = G; }
130 const GlobalValue *getGlobalValue() { return GV; }
131 };
132
133 /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
134 /// make the right decision when generating code for different targets.
135 const TargetMachine &TM;
136 const MipsSubtarget *Subtarget;
137 const TargetInstrInfo &TII;
138 const TargetLowering &TLI;
139 MipsFunctionInfo *MFI;
140
141 // Convenience variables to avoid some queries.
142 LLVMContext *Context;
143
144 bool fastLowerArguments() override;
145 bool fastLowerCall(CallLoweringInfo &CLI) override;
146 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
147
148 bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle
149 // floating point but not reject doing fast-isel in other
150 // situations
151
152private:
153 // Selection routines.
154 bool selectLogicalOp(const Instruction *I);
155 bool selectLoad(const Instruction *I);
156 bool selectStore(const Instruction *I);
157 bool selectBranch(const Instruction *I);
158 bool selectSelect(const Instruction *I);
159 bool selectCmp(const Instruction *I);
160 bool selectFPExt(const Instruction *I);
161 bool selectFPTrunc(const Instruction *I);
162 bool selectFPToInt(const Instruction *I, bool IsSigned);
163 bool selectRet(const Instruction *I);
164 bool selectTrunc(const Instruction *I);
165 bool selectIntExt(const Instruction *I);
166 bool selectShift(const Instruction *I);
167 bool selectDivRem(const Instruction *I, unsigned ISDOpcode);
168
169 // Utility helper routines.
170 bool isTypeLegal(Type *Ty, MVT &VT);
171 bool isTypeSupported(Type *Ty, MVT &VT);
172 bool isLoadTypeLegal(Type *Ty, MVT &VT);
173 bool computeAddress(const Value *Obj, Address &Addr);
174 bool computeCallAddress(const Value *V, Address &Addr);
175 void simplifyAddress(Address &Addr);
176
177 // Emit helper routines.
178 bool emitCmp(unsigned DestReg, const CmpInst *CI);
179 bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr);
180 bool emitStore(MVT VT, unsigned SrcReg, Address &Addr);
181 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
182 bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg,
183
184 bool IsZExt);
185 bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
186
187 bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
188 bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
189 unsigned DestReg);
190 bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
191 unsigned DestReg);
192
193 unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned);
194
195 unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
196 const Value *RHS);
197
198 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
199 unsigned materializeGV(const GlobalValue *GV, MVT VT);
200 unsigned materializeInt(const Constant *C, MVT VT);
201 unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC);
202 unsigned materializeExternalCallSym(MCSymbol *Syn);
203
204 MachineInstrBuilder emitInst(unsigned Opc) {
205 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
206 }
207
208 MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) {
209 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
210 DstReg);
211 }
212
213 MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg,
214 unsigned MemReg, int64_t MemOffset) {
215 return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset);
216 }
217
218 MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg,
219 unsigned MemReg, int64_t MemOffset) {
220 return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset);
221 }
222
223 unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
224 const TargetRegisterClass *RC,
225 unsigned Op0, unsigned Op1);
226
227 // for some reason, this default is not generated by tablegen
228 // so we explicitly generate it here.
229 unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
230 unsigned Op0, uint64_t imm1, uint64_t imm2,
231 unsigned Op3) {
232 return 0;
233 }
234
235 // Call handling routines.
236private:
237 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
238 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
239 unsigned &NumBytes);
240 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
241
242 const MipsABIInfo &getABI() const {
243 return static_cast<const MipsTargetMachine &>(TM).getABI();
244 }
245
246public:
247 // Backend specific FastISel code.
248 explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
249 const TargetLibraryInfo *libInfo,
250 const LibcallLoweringInfo *libcallLowering)
251 : FastISel(funcInfo, libInfo, libcallLowering),
252 TM(funcInfo.MF->getTarget()),
253 Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()),
254 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
255 MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
256 Context = &funcInfo.Fn->getContext();
257 UnsupportedFPMode = Subtarget->isFP64bit() || Subtarget->useSoftFloat();
258 }
259
260 Register fastMaterializeAlloca(const AllocaInst *AI) override;
261 Register fastMaterializeConstant(const Constant *C) override;
262 bool fastSelectInstruction(const Instruction *I) override;
263
264#include "MipsGenFastISel.inc"
265};
266
267} // end anonymous namespace
268
269[[maybe_unused]] static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT,
270 CCValAssign::LocInfo LocInfo,
271 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
272 CCState &State);
273
274static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT,
275 CCValAssign::LocInfo LocInfo,
276 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
277 CCState &State) {
278 llvm_unreachable("should not be called");
279}
280
281static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
282 CCValAssign::LocInfo LocInfo,
283 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
284 CCState &State) {
285 llvm_unreachable("should not be called");
286}
287
288#include "MipsGenCallingConv.inc"
289
290CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const {
291 return CC_MipsO32;
292}
293
294unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
295 const Value *LHS, const Value *RHS) {
296 // Canonicalize immediates to the RHS first.
298 std::swap(LHS, RHS);
299
300 unsigned Opc;
301 switch (ISDOpc) {
302 case ISD::AND:
303 Opc = Mips::AND;
304 break;
305 case ISD::OR:
306 Opc = Mips::OR;
307 break;
308 case ISD::XOR:
309 Opc = Mips::XOR;
310 break;
311 default:
312 llvm_unreachable("unexpected opcode");
313 }
314
315 Register LHSReg = getRegForValue(LHS);
316 if (!LHSReg)
317 return 0;
318
319 unsigned RHSReg;
320 if (const auto *C = dyn_cast<ConstantInt>(RHS))
321 RHSReg = materializeInt(C, MVT::i32);
322 else
323 RHSReg = getRegForValue(RHS);
324 if (!RHSReg)
325 return 0;
326
327 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
328 if (!ResultReg)
329 return 0;
330
331 emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg);
332 return ResultReg;
333}
334
335Register MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
336 assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i32 &&
337 "Alloca should always return a pointer.");
338
339 DenseMap<const AllocaInst *, int>::iterator SI =
340 FuncInfo.StaticAllocaMap.find(AI);
341
342 if (SI != FuncInfo.StaticAllocaMap.end()) {
343 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
344 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::LEA_ADDiu),
345 ResultReg)
346 .addFrameIndex(SI->second)
347 .addImm(0);
348 return ResultReg;
349 }
350
351 return Register();
352}
353
354unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
355 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
356 return 0;
357 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
358 const ConstantInt *CI = cast<ConstantInt>(C);
359 return materialize32BitInt(CI->getZExtValue(), RC);
360}
361
362unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
363 const TargetRegisterClass *RC) {
364 Register ResultReg = createResultReg(RC);
365
366 if (isInt<16>(Imm)) {
367 unsigned Opc = Mips::ADDiu;
368 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
369 return ResultReg;
370 } else if (isUInt<16>(Imm)) {
371 emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
372 return ResultReg;
373 }
374 unsigned Lo = Imm & 0xFFFF;
375 unsigned Hi = (Imm >> 16) & 0xFFFF;
376 if (Lo) {
377 // Both Lo and Hi have nonzero bits.
378 Register TmpReg = createResultReg(RC);
379 emitInst(Mips::LUi, TmpReg).addImm(Hi);
380 emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
381 } else {
382 emitInst(Mips::LUi, ResultReg).addImm(Hi);
383 }
384 return ResultReg;
385}
386
387unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
388 if (UnsupportedFPMode)
389 return 0;
390 int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
391 if (VT == MVT::f32) {
392 const TargetRegisterClass *RC = &Mips::FGR32RegClass;
393 Register DestReg = createResultReg(RC);
394 unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
395 emitInst(Mips::MTC1, DestReg).addReg(TempReg);
396 return DestReg;
397 } else if (VT == MVT::f64) {
398 const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
399 Register DestReg = createResultReg(RC);
400 unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
401 unsigned TempReg2 =
402 materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
403 emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
404 return DestReg;
405 }
406 return 0;
407}
408
409unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
410 // For now 32-bit only.
411 if (VT != MVT::i32)
412 return 0;
413 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
414 Register DestReg = createResultReg(RC);
415 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
416 bool IsThreadLocal = GVar && GVar->isThreadLocal();
417 // TLS not supported at this time.
418 if (IsThreadLocal)
419 return 0;
420 emitInst(Mips::LW, DestReg)
421 .addReg(MFI->getGlobalBaseReg(*MF))
422 .addGlobalAddress(GV, 0, MipsII::MO_GOT);
423 if ((GV->hasInternalLinkage() ||
424 (GV->hasLocalLinkage() && !isa<Function>(GV)))) {
425 Register TempReg = createResultReg(RC);
426 emitInst(Mips::ADDiu, TempReg)
427 .addReg(DestReg)
428 .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
429 DestReg = TempReg;
430 }
431 return DestReg;
432}
433
434unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
435 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
436 Register DestReg = createResultReg(RC);
437 emitInst(Mips::LW, DestReg)
438 .addReg(MFI->getGlobalBaseReg(*MF))
439 .addSym(Sym, MipsII::MO_GOT);
440 return DestReg;
441}
442
443// Materialize a constant into a register, and return the register
444// number (or zero if we failed to handle it).
445Register MipsFastISel::fastMaterializeConstant(const Constant *C) {
446 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
447
448 // Only handle simple types.
449 if (!CEVT.isSimple())
450 return Register();
451 MVT VT = CEVT.getSimpleVT();
452
453 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
454 return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT);
455 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
456 return materializeGV(GV, VT);
457 else if (isa<ConstantInt>(C))
458 return materializeInt(C, VT);
459
460 return Register();
461}
462
463bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
464 const User *U = nullptr;
465 unsigned Opcode = Instruction::UserOp1;
466 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
467 // Don't walk into other basic blocks unless the object is an alloca from
468 // another block, otherwise it may not have a virtual register assigned.
469 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
470 FuncInfo.getMBB(I->getParent()) == FuncInfo.MBB) {
471 Opcode = I->getOpcode();
472 U = I;
473 }
474 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
475 Opcode = C->getOpcode();
476 U = C;
477 }
478 switch (Opcode) {
479 default:
480 break;
481 case Instruction::BitCast:
482 // Look through bitcasts.
483 return computeAddress(U->getOperand(0), Addr);
484 case Instruction::GetElementPtr: {
485 Address SavedAddr = Addr;
486 int64_t TmpOffset = Addr.getOffset();
487 // Iterate through the GEP folding the constants into offsets where
488 // we can.
490 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
491 ++i, ++GTI) {
492 const Value *Op = *i;
493 if (StructType *STy = GTI.getStructTypeOrNull()) {
494 const StructLayout *SL = DL.getStructLayout(STy);
495 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
496 TmpOffset += SL->getElementOffset(Idx);
497 } else {
498 uint64_t S = GTI.getSequentialElementStride(DL);
499 while (true) {
500 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
501 // Constant-offset addressing.
502 TmpOffset += CI->getSExtValue() * S;
503 break;
504 }
505 if (canFoldAddIntoGEP(U, Op)) {
506 // A compatible add with a constant operand. Fold the constant.
507 ConstantInt *CI =
508 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
509 TmpOffset += CI->getSExtValue() * S;
510 // Iterate on the other operand.
511 Op = cast<AddOperator>(Op)->getOperand(0);
512 continue;
513 }
514 // Unsupported
515 goto unsupported_gep;
516 }
517 }
518 }
519 // Try to grab the base operand now.
520 Addr.setOffset(TmpOffset);
521 if (computeAddress(U->getOperand(0), Addr))
522 return true;
523 // We failed, restore everything and try the other options.
524 Addr = SavedAddr;
525 unsupported_gep:
526 break;
527 }
528 case Instruction::Alloca: {
529 const AllocaInst *AI = cast<AllocaInst>(Obj);
530 DenseMap<const AllocaInst *, int>::iterator SI =
531 FuncInfo.StaticAllocaMap.find(AI);
532 if (SI != FuncInfo.StaticAllocaMap.end()) {
533 Addr.setKind(Address::FrameIndexBase);
534 Addr.setFI(SI->second);
535 return true;
536 }
537 break;
538 }
539 }
540 Addr.setReg(getRegForValue(Obj));
541 return Addr.getReg() != 0;
542}
543
544bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
545 const User *U = nullptr;
546 unsigned Opcode = Instruction::UserOp1;
547
548 if (const auto *I = dyn_cast<Instruction>(V)) {
549 // Check if the value is defined in the same basic block. This information
550 // is crucial to know whether or not folding an operand is valid.
551 if (I->getParent() == FuncInfo.MBB->getBasicBlock()) {
552 Opcode = I->getOpcode();
553 U = I;
554 }
555 } else if (const auto *C = dyn_cast<ConstantExpr>(V)) {
556 Opcode = C->getOpcode();
557 U = C;
558 }
559
560 switch (Opcode) {
561 default:
562 break;
563 case Instruction::BitCast:
564 // Look past bitcasts if its operand is in the same BB.
565 return computeCallAddress(U->getOperand(0), Addr);
566 break;
567 case Instruction::IntToPtr:
568 // Look past no-op inttoptrs if its operand is in the same BB.
569 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
570 TLI.getPointerTy(DL))
571 return computeCallAddress(U->getOperand(0), Addr);
572 break;
573 case Instruction::PtrToInt:
574 // Look past no-op ptrtoints if its operand is in the same BB.
575 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
576 return computeCallAddress(U->getOperand(0), Addr);
577 break;
578 }
579
580 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
581 Addr.setGlobalValue(GV);
582 return true;
583 }
584
585 // If all else fails, try to materialize the value in a register.
586 if (!Addr.getGlobalValue()) {
587 Addr.setReg(getRegForValue(V));
588 return Addr.getReg() != 0;
589 }
590
591 return false;
592}
593
594bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) {
595 EVT evt = TLI.getValueType(DL, Ty, true);
596 // Only handle simple types.
597 if (evt == MVT::Other || !evt.isSimple())
598 return false;
599 VT = evt.getSimpleVT();
600
601 // Handle all legal types, i.e. a register that will directly hold this
602 // value.
603 return TLI.isTypeLegal(VT);
604}
605
606bool MipsFastISel::isTypeSupported(Type *Ty, MVT &VT) {
607 if (Ty->isVectorTy())
608 return false;
609
610 if (isTypeLegal(Ty, VT))
611 return true;
612
613 // If this is a type than can be sign or zero-extended to a basic operation
614 // go ahead and accept it now.
615 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
616 return true;
617
618 return false;
619}
620
621bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
622 if (isTypeLegal(Ty, VT))
623 return true;
624 // We will extend this in a later patch:
625 // If this is a type than can be sign or zero-extended to a basic operation
626 // go ahead and accept it now.
627 if (VT == MVT::i8 || VT == MVT::i16)
628 return true;
629 return false;
630}
631
632// Because of how EmitCmp is called with fast-isel, you can
633// end up with redundant "andi" instructions after the sequences emitted below.
634// We should try and solve this issue in the future.
635//
636bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
637 const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1);
638 bool IsUnsigned = CI->isUnsigned();
639 unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned);
640 if (LeftReg == 0)
641 return false;
642 unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned);
643 if (RightReg == 0)
644 return false;
646
647 switch (P) {
648 default:
649 return false;
650 case CmpInst::ICMP_EQ: {
651 Register TempReg = createResultReg(&Mips::GPR32RegClass);
652 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
653 emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
654 break;
655 }
656 case CmpInst::ICMP_NE: {
657 Register TempReg = createResultReg(&Mips::GPR32RegClass);
658 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
659 emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
660 break;
661 }
663 emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg);
664 break;
666 emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
667 break;
668 case CmpInst::ICMP_UGE: {
669 Register TempReg = createResultReg(&Mips::GPR32RegClass);
670 emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
671 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
672 break;
673 }
674 case CmpInst::ICMP_ULE: {
675 Register TempReg = createResultReg(&Mips::GPR32RegClass);
676 emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
677 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
678 break;
679 }
681 emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg);
682 break;
684 emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
685 break;
686 case CmpInst::ICMP_SGE: {
687 Register TempReg = createResultReg(&Mips::GPR32RegClass);
688 emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
689 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
690 break;
691 }
692 case CmpInst::ICMP_SLE: {
693 Register TempReg = createResultReg(&Mips::GPR32RegClass);
694 emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
695 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
696 break;
697 }
703 case CmpInst::FCMP_OGE: {
704 if (UnsupportedFPMode)
705 return false;
706 bool IsFloat = Left->getType()->isFloatTy();
707 bool IsDouble = Left->getType()->isDoubleTy();
708 if (!IsFloat && !IsDouble)
709 return false;
710 unsigned Opc, CondMovOpc;
711 switch (P) {
713 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
714 CondMovOpc = Mips::MOVT_I;
715 break;
717 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
718 CondMovOpc = Mips::MOVF_I;
719 break;
721 Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32;
722 CondMovOpc = Mips::MOVT_I;
723 break;
725 Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32;
726 CondMovOpc = Mips::MOVT_I;
727 break;
729 Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32;
730 CondMovOpc = Mips::MOVF_I;
731 break;
733 Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32;
734 CondMovOpc = Mips::MOVF_I;
735 break;
736 default:
737 llvm_unreachable("Only switching of a subset of CCs.");
738 }
739 Register RegWithZero = createResultReg(&Mips::GPR32RegClass);
740 Register RegWithOne = createResultReg(&Mips::GPR32RegClass);
741 emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
742 emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
743 emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
744 .addReg(RightReg);
745 emitInst(CondMovOpc, ResultReg)
746 .addReg(RegWithOne)
747 .addReg(Mips::FCC0)
748 .addReg(RegWithZero);
749 break;
750 }
751 }
752 return true;
753}
754
755bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr) {
756 //
757 // more cases will be handled here in following patches.
758 //
759 unsigned Opc;
760 switch (VT.SimpleTy) {
761 case MVT::i32:
762 ResultReg = createResultReg(&Mips::GPR32RegClass);
763 Opc = Mips::LW;
764 break;
765 case MVT::i16:
766 ResultReg = createResultReg(&Mips::GPR32RegClass);
767 Opc = Mips::LHu;
768 break;
769 case MVT::i8:
770 ResultReg = createResultReg(&Mips::GPR32RegClass);
771 Opc = Mips::LBu;
772 break;
773 case MVT::f32:
774 if (UnsupportedFPMode)
775 return false;
776 ResultReg = createResultReg(&Mips::FGR32RegClass);
777 Opc = Mips::LWC1;
778 break;
779 case MVT::f64:
780 if (UnsupportedFPMode)
781 return false;
782 ResultReg = createResultReg(&Mips::AFGR64RegClass);
783 Opc = Mips::LDC1;
784 break;
785 default:
786 return false;
787 }
788 if (Addr.isRegBase()) {
789 simplifyAddress(Addr);
790 emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
791 return true;
792 }
793 if (Addr.isFIBase()) {
794 unsigned FI = Addr.getFI();
795 int64_t Offset = Addr.getOffset();
796 MachineFrameInfo &MFI = MF->getFrameInfo();
797 MachineMemOperand *MMO = MF->getMachineMemOperand(
799 MFI.getObjectSize(FI), Align(4));
800 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
801 .addFrameIndex(FI)
802 .addImm(Offset)
803 .addMemOperand(MMO);
804 return true;
805 }
806 return false;
807}
808
809bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr) {
810 //
811 // more cases will be handled here in following patches.
812 //
813 unsigned Opc;
814 switch (VT.SimpleTy) {
815 case MVT::i8:
816 Opc = Mips::SB;
817 break;
818 case MVT::i16:
819 Opc = Mips::SH;
820 break;
821 case MVT::i32:
822 Opc = Mips::SW;
823 break;
824 case MVT::f32:
825 if (UnsupportedFPMode)
826 return false;
827 Opc = Mips::SWC1;
828 break;
829 case MVT::f64:
830 if (UnsupportedFPMode)
831 return false;
832 Opc = Mips::SDC1;
833 break;
834 default:
835 return false;
836 }
837 if (Addr.isRegBase()) {
838 simplifyAddress(Addr);
839 emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
840 return true;
841 }
842 if (Addr.isFIBase()) {
843 unsigned FI = Addr.getFI();
844 int64_t Offset = Addr.getOffset();
845 MachineFrameInfo &MFI = MF->getFrameInfo();
846 MachineMemOperand *MMO = MF->getMachineMemOperand(
848 MFI.getObjectSize(FI), Align(4));
849 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc))
850 .addReg(SrcReg)
851 .addFrameIndex(FI)
852 .addImm(Offset)
853 .addMemOperand(MMO);
854 return true;
855 }
856 return false;
857}
858
859bool MipsFastISel::selectLogicalOp(const Instruction *I) {
860 MVT VT;
861 if (!isTypeSupported(I->getType(), VT))
862 return false;
863
864 unsigned ResultReg;
865 switch (I->getOpcode()) {
866 default:
867 llvm_unreachable("Unexpected instruction.");
868 case Instruction::And:
869 ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1));
870 break;
871 case Instruction::Or:
872 ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1));
873 break;
874 case Instruction::Xor:
875 ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1));
876 break;
877 }
878
879 if (!ResultReg)
880 return false;
881
882 updateValueMap(I, ResultReg);
883 return true;
884}
885
886bool MipsFastISel::selectLoad(const Instruction *I) {
887 const LoadInst *LI = cast<LoadInst>(I);
888
889 // Atomic loads need special handling.
890 if (LI->isAtomic())
891 return false;
892
893 // Verify we have a legal type before going any further.
894 MVT VT;
895 if (!isLoadTypeLegal(LI->getType(), VT))
896 return false;
897
898 // Underaligned loads need special handling.
899 if (LI->getAlign() < VT.getFixedSizeInBits() / 8 &&
900 !Subtarget->systemSupportsUnalignedAccess())
901 return false;
902
903 // See if we can handle this address.
904 Address Addr;
905 if (!computeAddress(LI->getOperand(0), Addr))
906 return false;
907
908 unsigned ResultReg;
909 if (!emitLoad(VT, ResultReg, Addr))
910 return false;
911 updateValueMap(LI, ResultReg);
912 return true;
913}
914
915bool MipsFastISel::selectStore(const Instruction *I) {
916 const StoreInst *SI = cast<StoreInst>(I);
917
918 Value *Op0 = SI->getOperand(0);
919 unsigned SrcReg = 0;
920
921 // Atomic stores need special handling.
922 if (SI->isAtomic())
923 return false;
924
925 // Verify we have a legal type before going any further.
926 MVT VT;
927 if (!isLoadTypeLegal(SI->getOperand(0)->getType(), VT))
928 return false;
929
930 // Underaligned stores need special handling.
931 if (SI->getAlign() < VT.getFixedSizeInBits() / 8 &&
932 !Subtarget->systemSupportsUnalignedAccess())
933 return false;
934
935 // Get the value to be stored into a register.
936 SrcReg = getRegForValue(Op0);
937 if (SrcReg == 0)
938 return false;
939
940 // See if we can handle this address.
941 Address Addr;
942 if (!computeAddress(SI->getOperand(1), Addr))
943 return false;
944
945 if (!emitStore(VT, SrcReg, Addr))
946 return false;
947 return true;
948}
949
950// This can cause a redundant sltiu to be generated.
951// FIXME: try and eliminate this in a future patch.
952bool MipsFastISel::selectBranch(const Instruction *I) {
953 const BranchInst *BI = cast<BranchInst>(I);
954 MachineBasicBlock *BrBB = FuncInfo.MBB;
955 //
956 // TBB is the basic block for the case where the comparison is true.
957 // FBB is the basic block for the case where the comparison is false.
958 // if (cond) goto TBB
959 // goto FBB
960 // TBB:
961 //
962 MachineBasicBlock *TBB = FuncInfo.getMBB(BI->getSuccessor(0));
963 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->getSuccessor(1));
964
965 // Fold the common case of a conditional branch with a comparison
966 // in the same block.
967 unsigned ZExtCondReg = 0;
968 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
969 if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
970 ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
971 if (!emitCmp(ZExtCondReg, CI))
972 return false;
973 }
974 }
975
976 // For the general case, we need to mask with 1.
977 if (ZExtCondReg == 0) {
978 Register CondReg = getRegForValue(BI->getCondition());
979 if (CondReg == 0)
980 return false;
981
982 ZExtCondReg = emitIntExt(MVT::i1, CondReg, MVT::i32, true);
983 if (ZExtCondReg == 0)
984 return false;
985 }
986
987 BuildMI(*BrBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::BGTZ))
988 .addReg(ZExtCondReg)
989 .addMBB(TBB);
990 finishCondBranch(BI->getParent(), TBB, FBB);
991 return true;
992}
993
994bool MipsFastISel::selectCmp(const Instruction *I) {
995 const CmpInst *CI = cast<CmpInst>(I);
996 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
997 if (!emitCmp(ResultReg, CI))
998 return false;
999 updateValueMap(I, ResultReg);
1000 return true;
1001}
1002
1003// Attempt to fast-select a floating-point extend instruction.
1004bool MipsFastISel::selectFPExt(const Instruction *I) {
1005 if (UnsupportedFPMode)
1006 return false;
1007 Value *Src = I->getOperand(0);
1008 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1009 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1010
1011 if (SrcVT != MVT::f32 || DestVT != MVT::f64)
1012 return false;
1013
1014 Register SrcReg =
1015 getRegForValue(Src); // this must be a 32bit floating point register class
1016 // maybe we should handle this differently
1017 if (!SrcReg)
1018 return false;
1019
1020 Register DestReg = createResultReg(&Mips::AFGR64RegClass);
1021 emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
1022 updateValueMap(I, DestReg);
1023 return true;
1024}
1025
1026bool MipsFastISel::selectSelect(const Instruction *I) {
1027 assert(isa<SelectInst>(I) && "Expected a select instruction.");
1028
1029 LLVM_DEBUG(dbgs() << "selectSelect\n");
1030
1031 MVT VT;
1032 if (!isTypeSupported(I->getType(), VT) || UnsupportedFPMode) {
1033 LLVM_DEBUG(
1034 dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
1035 return false;
1036 }
1037
1038 unsigned CondMovOpc;
1039 const TargetRegisterClass *RC;
1040
1041 if (VT.isInteger() && !VT.isVector() && VT.getSizeInBits() <= 32) {
1042 CondMovOpc = Mips::MOVN_I_I;
1043 RC = &Mips::GPR32RegClass;
1044 } else if (VT == MVT::f32) {
1045 CondMovOpc = Mips::MOVN_I_S;
1046 RC = &Mips::FGR32RegClass;
1047 } else if (VT == MVT::f64) {
1048 CondMovOpc = Mips::MOVN_I_D32;
1049 RC = &Mips::AFGR64RegClass;
1050 } else
1051 return false;
1052
1053 const SelectInst *SI = cast<SelectInst>(I);
1054 const Value *Cond = SI->getCondition();
1055 Register Src1Reg = getRegForValue(SI->getTrueValue());
1056 Register Src2Reg = getRegForValue(SI->getFalseValue());
1057 Register CondReg = getRegForValue(Cond);
1058
1059 if (!Src1Reg || !Src2Reg || !CondReg)
1060 return false;
1061
1062 Register ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
1063 if (!ZExtCondReg)
1064 return false;
1065
1066 if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true))
1067 return false;
1068
1069 Register ResultReg = createResultReg(RC);
1070 Register TempReg = createResultReg(RC);
1071
1072 if (!ResultReg || !TempReg)
1073 return false;
1074
1075 emitInst(TargetOpcode::COPY, TempReg).addReg(Src2Reg);
1076 emitInst(CondMovOpc, ResultReg)
1077 .addReg(Src1Reg).addReg(ZExtCondReg).addReg(TempReg);
1078 updateValueMap(I, ResultReg);
1079 return true;
1080}
1081
1082// Attempt to fast-select a floating-point truncate instruction.
1083bool MipsFastISel::selectFPTrunc(const Instruction *I) {
1084 if (UnsupportedFPMode)
1085 return false;
1086 Value *Src = I->getOperand(0);
1087 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1088 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1089
1090 if (SrcVT != MVT::f64 || DestVT != MVT::f32)
1091 return false;
1092
1093 Register SrcReg = getRegForValue(Src);
1094 if (!SrcReg)
1095 return false;
1096
1097 Register DestReg = createResultReg(&Mips::FGR32RegClass);
1098 if (!DestReg)
1099 return false;
1100
1101 emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg);
1102 updateValueMap(I, DestReg);
1103 return true;
1104}
1105
1106// Attempt to fast-select a floating-point-to-integer conversion.
1107bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
1108 if (UnsupportedFPMode)
1109 return false;
1110 MVT DstVT, SrcVT;
1111 if (!IsSigned)
1112 return false; // We don't handle this case yet. There is no native
1113 // instruction for this but it can be synthesized.
1114 Type *DstTy = I->getType();
1115 if (!isTypeLegal(DstTy, DstVT))
1116 return false;
1117
1118 if (DstVT != MVT::i32)
1119 return false;
1120
1121 Value *Src = I->getOperand(0);
1122 Type *SrcTy = Src->getType();
1123 if (!isTypeLegal(SrcTy, SrcVT))
1124 return false;
1125
1126 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
1127 return false;
1128
1129 Register SrcReg = getRegForValue(Src);
1130 if (SrcReg == 0)
1131 return false;
1132
1133 // Determine the opcode for the conversion, which takes place
1134 // entirely within FPRs.
1135 Register DestReg = createResultReg(&Mips::GPR32RegClass);
1136 Register TempReg = createResultReg(&Mips::FGR32RegClass);
1137 unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32;
1138
1139 // Generate the convert.
1140 emitInst(Opc, TempReg).addReg(SrcReg);
1141 emitInst(Mips::MFC1, DestReg).addReg(TempReg);
1142
1143 updateValueMap(I, DestReg);
1144 return true;
1145}
1146
1147bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
1148 SmallVectorImpl<MVT> &OutVTs,
1149 unsigned &NumBytes) {
1150 CallingConv::ID CC = CLI.CallConv;
1153 for (const ArgListEntry &Arg : CLI.Args)
1154 ArgTys.push_back(Arg.Val->getType());
1155 CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
1156 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, ArgTys,
1157 CCAssignFnForCall(CC));
1158 // Get a count of how many bytes are to be pushed on the stack.
1159 NumBytes = CCInfo.getStackSize();
1160 // This is the minimum argument area used for A0-A3.
1161 if (NumBytes < 16)
1162 NumBytes = 16;
1163
1164 emitInst(Mips::ADJCALLSTACKDOWN).addImm(16).addImm(0);
1165 // Process the args.
1166 MVT firstMVT;
1167 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1168 CCValAssign &VA = ArgLocs[i];
1169 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
1170 MVT ArgVT = OutVTs[VA.getValNo()];
1171
1172 if (i == 0) {
1173 firstMVT = ArgVT;
1174 if (ArgVT == MVT::f32) {
1175 VA.convertToReg(Mips::F12);
1176 } else if (ArgVT == MVT::f64) {
1177 if (Subtarget->isFP64bit())
1178 VA.convertToReg(Mips::D6_64);
1179 else
1180 VA.convertToReg(Mips::D6);
1181 }
1182 } else if (i == 1) {
1183 if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
1184 if (ArgVT == MVT::f32) {
1185 VA.convertToReg(Mips::F14);
1186 } else if (ArgVT == MVT::f64) {
1187 if (Subtarget->isFP64bit())
1188 VA.convertToReg(Mips::D7_64);
1189 else
1190 VA.convertToReg(Mips::D7);
1191 }
1192 }
1193 }
1194 if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32) || (ArgVT == MVT::i16) ||
1195 (ArgVT == MVT::i8)) &&
1196 VA.isMemLoc()) {
1197 switch (VA.getLocMemOffset()) {
1198 case 0:
1199 VA.convertToReg(Mips::A0);
1200 break;
1201 case 4:
1202 VA.convertToReg(Mips::A1);
1203 break;
1204 case 8:
1205 VA.convertToReg(Mips::A2);
1206 break;
1207 case 12:
1208 VA.convertToReg(Mips::A3);
1209 break;
1210 default:
1211 break;
1212 }
1213 }
1214 Register ArgReg = getRegForValue(ArgVal);
1215 if (!ArgReg)
1216 return false;
1217
1218 // Handle arg promotion: SExt, ZExt, AExt.
1219 switch (VA.getLocInfo()) {
1220 case CCValAssign::Full:
1221 break;
1222 case CCValAssign::AExt:
1223 case CCValAssign::SExt: {
1224 MVT DestVT = VA.getLocVT();
1225 MVT SrcVT = ArgVT;
1226 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
1227 if (!ArgReg)
1228 return false;
1229 break;
1230 }
1231 case CCValAssign::ZExt: {
1232 MVT DestVT = VA.getLocVT();
1233 MVT SrcVT = ArgVT;
1234 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
1235 if (!ArgReg)
1236 return false;
1237 break;
1238 }
1239 default:
1240 llvm_unreachable("Unknown arg promotion!");
1241 }
1242
1243 // Now copy/store arg to correct locations.
1244 if (VA.isRegLoc() && !VA.needsCustom()) {
1245 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1246 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
1247 CLI.OutRegs.push_back(VA.getLocReg());
1248 } else if (VA.needsCustom()) {
1249 llvm_unreachable("Mips does not use custom args.");
1250 return false;
1251 } else {
1252 //
1253 // FIXME: This path will currently return false. It was copied
1254 // from the AArch64 port and should be essentially fine for Mips too.
1255 // The work to finish up this path will be done in a follow-on patch.
1256 //
1257 assert(VA.isMemLoc() && "Assuming store on stack.");
1258 // Don't emit stores for undef values.
1259 if (isa<UndefValue>(ArgVal))
1260 continue;
1261
1262 // Need to store on the stack.
1263 // FIXME: This alignment is incorrect but this path is disabled
1264 // for now (will return false). We need to determine the right alignment
1265 // based on the normal alignment for the underlying machine type.
1266 //
1267 unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
1268
1269 unsigned BEAlign = 0;
1270 if (ArgSize < 8 && !Subtarget->isLittle())
1271 BEAlign = 8 - ArgSize;
1272
1273 Address Addr;
1274 Addr.setKind(Address::RegBase);
1275 Addr.setReg(Mips::SP);
1276 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
1277
1278 Align Alignment = DL.getABITypeAlign(ArgVal->getType());
1279 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
1280 MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()),
1281 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
1282 (void)(MMO);
1283 // if (!emitStore(ArgVT, ArgReg, Addr, MMO))
1284 return false; // can't store on the stack yet.
1285 }
1286 }
1287
1288 return true;
1289}
1290
1291bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
1292 unsigned NumBytes) {
1293 CallingConv::ID CC = CLI.CallConv;
1294 emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0);
1295 if (RetVT != MVT::isVoid) {
1297 MipsCCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
1298
1299 CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips);
1300
1301 // Only handle a single return value.
1302 if (RVLocs.size() != 1)
1303 return false;
1304 // Copy all of the result registers out of their specified physreg.
1305 MVT CopyVT = RVLocs[0].getValVT();
1306 // Special handling for extended integers.
1307 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
1308 CopyVT = MVT::i32;
1309
1310 Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1311 if (!ResultReg)
1312 return false;
1313 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1314 TII.get(TargetOpcode::COPY),
1315 ResultReg).addReg(RVLocs[0].getLocReg());
1316 CLI.InRegs.push_back(RVLocs[0].getLocReg());
1317
1318 CLI.ResultReg = ResultReg;
1319 CLI.NumResultRegs = 1;
1320 }
1321 return true;
1322}
1323
1324bool MipsFastISel::fastLowerArguments() {
1325 LLVM_DEBUG(dbgs() << "fastLowerArguments\n");
1326
1327 if (!FuncInfo.CanLowerReturn) {
1328 LLVM_DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
1329 return false;
1330 }
1331
1332 const Function *F = FuncInfo.Fn;
1333 if (F->isVarArg()) {
1334 LLVM_DEBUG(dbgs() << ".. gave up (varargs)\n");
1335 return false;
1336 }
1337
1338 CallingConv::ID CC = F->getCallingConv();
1339 if (CC != CallingConv::C) {
1340 LLVM_DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
1341 return false;
1342 }
1343
1344 std::array<MCPhysReg, 4> GPR32ArgRegs = {{Mips::A0, Mips::A1, Mips::A2,
1345 Mips::A3}};
1346 std::array<MCPhysReg, 2> FGR32ArgRegs = {{Mips::F12, Mips::F14}};
1347 std::array<MCPhysReg, 2> AFGR64ArgRegs = {{Mips::D6, Mips::D7}};
1348 auto NextGPR32 = GPR32ArgRegs.begin();
1349 auto NextFGR32 = FGR32ArgRegs.begin();
1350 auto NextAFGR64 = AFGR64ArgRegs.begin();
1351
1352 struct AllocatedReg {
1353 const TargetRegisterClass *RC;
1354 unsigned Reg;
1355 AllocatedReg(const TargetRegisterClass *RC, unsigned Reg)
1356 : RC(RC), Reg(Reg) {}
1357 };
1358
1359 // Only handle simple cases. i.e. All arguments are directly mapped to
1360 // registers of the appropriate type.
1362 for (const auto &FormalArg : F->args()) {
1363 if (FormalArg.hasAttribute(Attribute::InReg) ||
1364 FormalArg.hasAttribute(Attribute::StructRet) ||
1365 FormalArg.hasAttribute(Attribute::ByVal)) {
1366 LLVM_DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
1367 return false;
1368 }
1369
1370 Type *ArgTy = FormalArg.getType();
1371 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) {
1372 LLVM_DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
1373 return false;
1374 }
1375
1376 EVT ArgVT = TLI.getValueType(DL, ArgTy);
1377 LLVM_DEBUG(dbgs() << ".. " << FormalArg.getArgNo() << ": "
1378 << ArgVT << "\n");
1379 if (!ArgVT.isSimple()) {
1380 LLVM_DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
1381 return false;
1382 }
1383
1384 switch (ArgVT.getSimpleVT().SimpleTy) {
1385 case MVT::i1:
1386 case MVT::i8:
1387 case MVT::i16:
1388 if (!FormalArg.hasAttribute(Attribute::SExt) &&
1389 !FormalArg.hasAttribute(Attribute::ZExt)) {
1390 // It must be any extend, this shouldn't happen for clang-generated IR
1391 // so just fall back on SelectionDAG.
1392 LLVM_DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
1393 return false;
1394 }
1395
1396 if (NextGPR32 == GPR32ArgRegs.end()) {
1397 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1398 return false;
1399 }
1400
1401 LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1402 Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1403
1404 // Allocating any GPR32 prohibits further use of floating point arguments.
1405 NextFGR32 = FGR32ArgRegs.end();
1406 NextAFGR64 = AFGR64ArgRegs.end();
1407 break;
1408
1409 case MVT::i32:
1410 if (FormalArg.hasAttribute(Attribute::ZExt)) {
1411 // The O32 ABI does not permit a zero-extended i32.
1412 LLVM_DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
1413 return false;
1414 }
1415
1416 if (NextGPR32 == GPR32ArgRegs.end()) {
1417 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1418 return false;
1419 }
1420
1421 LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1422 Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1423
1424 // Allocating any GPR32 prohibits further use of floating point arguments.
1425 NextFGR32 = FGR32ArgRegs.end();
1426 NextAFGR64 = AFGR64ArgRegs.end();
1427 break;
1428
1429 case MVT::f32:
1430 if (UnsupportedFPMode) {
1431 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1432 return false;
1433 }
1434 if (NextFGR32 == FGR32ArgRegs.end()) {
1435 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
1436 return false;
1437 }
1438 LLVM_DEBUG(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
1439 Allocation.emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
1440 // Allocating an FGR32 also allocates the super-register AFGR64, and
1441 // ABI rules require us to skip the corresponding GPR32.
1442 if (NextGPR32 != GPR32ArgRegs.end())
1443 NextGPR32++;
1444 if (NextAFGR64 != AFGR64ArgRegs.end())
1445 NextAFGR64++;
1446 break;
1447
1448 case MVT::f64:
1449 if (UnsupportedFPMode) {
1450 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1451 return false;
1452 }
1453 if (NextAFGR64 == AFGR64ArgRegs.end()) {
1454 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
1455 return false;
1456 }
1457 LLVM_DEBUG(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
1458 Allocation.emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
1459 // Allocating an FGR32 also allocates the super-register AFGR64, and
1460 // ABI rules require us to skip the corresponding GPR32 pair.
1461 if (NextGPR32 != GPR32ArgRegs.end())
1462 NextGPR32++;
1463 if (NextGPR32 != GPR32ArgRegs.end())
1464 NextGPR32++;
1465 if (NextFGR32 != FGR32ArgRegs.end())
1466 NextFGR32++;
1467 break;
1468
1469 default:
1470 LLVM_DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
1471 return false;
1472 }
1473 }
1474
1475 for (const auto &FormalArg : F->args()) {
1476 unsigned ArgNo = FormalArg.getArgNo();
1477 unsigned SrcReg = Allocation[ArgNo].Reg;
1478 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
1479 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
1480 // Without this, EmitLiveInCopies may eliminate the livein if its only
1481 // use is a bitcast (which isn't turned into an instruction).
1482 Register ResultReg = createResultReg(Allocation[ArgNo].RC);
1483 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1484 TII.get(TargetOpcode::COPY), ResultReg)
1485 .addReg(DstReg, getKillRegState(true));
1486 updateValueMap(&FormalArg, ResultReg);
1487 }
1488
1489 // Calculate the size of the incoming arguments area.
1490 // We currently reject all the cases where this would be non-zero.
1491 unsigned IncomingArgSizeInBytes = 0;
1492
1493 // Account for the reserved argument area on ABI's that have one (O32).
1494 // It seems strange to do this on the caller side but it's necessary in
1495 // SelectionDAG's implementation.
1496 IncomingArgSizeInBytes = std::min(getABI().GetCalleeAllocdArgSizeInBytes(CC),
1497 IncomingArgSizeInBytes);
1498
1499 MF->getInfo<MipsFunctionInfo>()->setFormalArgInfo(IncomingArgSizeInBytes,
1500 false);
1501
1502 return true;
1503}
1504
1505bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
1506 CallingConv::ID CC = CLI.CallConv;
1507 bool IsTailCall = CLI.IsTailCall;
1508 bool IsVarArg = CLI.IsVarArg;
1509 const Value *Callee = CLI.Callee;
1510 MCSymbol *Symbol = CLI.Symbol;
1511
1512 // Do not handle FastCC.
1513 if (CC == CallingConv::Fast)
1514 return false;
1515
1516 // Allow SelectionDAG isel to handle tail calls.
1517 if (IsTailCall)
1518 return false;
1519
1520 // Let SDISel handle vararg functions.
1521 if (IsVarArg)
1522 return false;
1523
1524 // FIXME: Only handle *simple* calls for now.
1525 MVT RetVT;
1526 if (CLI.RetTy->isVoidTy())
1527 RetVT = MVT::isVoid;
1528 else if (!isTypeSupported(CLI.RetTy, RetVT))
1529 return false;
1530
1531 for (auto Flag : CLI.OutFlags)
1532 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
1533 return false;
1534
1535 // Set up the argument vectors.
1536 SmallVector<MVT, 16> OutVTs;
1537 OutVTs.reserve(CLI.OutVals.size());
1538
1539 for (auto *Val : CLI.OutVals) {
1540 MVT VT;
1541 if (!isTypeLegal(Val->getType(), VT) &&
1542 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
1543 return false;
1544
1545 // We don't handle vector parameters yet.
1546 if (VT.isVector() || VT.getSizeInBits() > 64)
1547 return false;
1548
1549 OutVTs.push_back(VT);
1550 }
1551
1552 Address Addr;
1553 if (!computeCallAddress(Callee, Addr))
1554 return false;
1555
1556 // Handle the arguments now that we've gotten them.
1557 unsigned NumBytes;
1558 if (!processCallArgs(CLI, OutVTs, NumBytes))
1559 return false;
1560
1561 if (!Addr.getGlobalValue())
1562 return false;
1563
1564 // Issue the call.
1565 unsigned DestAddress;
1566 if (Symbol)
1567 DestAddress = materializeExternalCallSym(Symbol);
1568 else
1569 DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32);
1570 emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress);
1571 MachineInstrBuilder MIB =
1572 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::JALR),
1573 Mips::RA).addReg(Mips::T9);
1574
1575 // Add implicit physical register uses to the call.
1576 for (auto Reg : CLI.OutRegs)
1578
1579 // Add a register mask with the call-preserved registers.
1580 // Proper defs for return values will be added by setPhysRegsDeadExcept().
1581 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
1582
1583 CLI.Call = MIB;
1584
1585 if (EmitJalrReloc && !Subtarget->inMips16Mode()) {
1586 // Attach callee address to the instruction, let asm printer emit
1587 // .reloc R_MIPS_JALR.
1588 if (Symbol)
1589 MIB.addSym(Symbol, MipsII::MO_JALR);
1590 else
1591 MIB.addSym(FuncInfo.MF->getContext().getOrCreateSymbol(
1592 Addr.getGlobalValue()->getName()), MipsII::MO_JALR);
1593 }
1594
1595 // Finish off the call including any return values.
1596 return finishCall(CLI, RetVT, NumBytes);
1597}
1598
1599bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
1600 switch (II->getIntrinsicID()) {
1601 default:
1602 return false;
1603 case Intrinsic::bswap: {
1604 Type *RetTy = II->getCalledFunction()->getReturnType();
1605
1606 MVT VT;
1607 if (!isTypeSupported(RetTy, VT))
1608 return false;
1609
1610 Register SrcReg = getRegForValue(II->getOperand(0));
1611 if (SrcReg == 0)
1612 return false;
1613 Register DestReg = createResultReg(&Mips::GPR32RegClass);
1614 if (DestReg == 0)
1615 return false;
1616 if (VT == MVT::i16) {
1617 if (Subtarget->hasMips32r2()) {
1618 emitInst(Mips::WSBH, DestReg).addReg(SrcReg);
1619 updateValueMap(II, DestReg);
1620 return true;
1621 } else {
1622 unsigned TempReg[3];
1623 for (unsigned &R : TempReg) {
1624 R = createResultReg(&Mips::GPR32RegClass);
1625 if (R == 0)
1626 return false;
1627 }
1628 emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8);
1629 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(8);
1630 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[1]).addImm(0xFF);
1631 emitInst(Mips::OR, DestReg).addReg(TempReg[0]).addReg(TempReg[2]);
1632 updateValueMap(II, DestReg);
1633 return true;
1634 }
1635 } else if (VT == MVT::i32) {
1636 if (Subtarget->hasMips32r2()) {
1637 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1638 emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
1639 emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
1640 updateValueMap(II, DestReg);
1641 return true;
1642 } else {
1643 unsigned TempReg[8];
1644 for (unsigned &R : TempReg) {
1645 R = createResultReg(&Mips::GPR32RegClass);
1646 if (R == 0)
1647 return false;
1648 }
1649
1650 emitInst(Mips::SRL, TempReg[0]).addReg(SrcReg).addImm(8);
1651 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(24);
1652 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[0]).addImm(0xFF00);
1653 emitInst(Mips::OR, TempReg[3]).addReg(TempReg[1]).addReg(TempReg[2]);
1654
1655 emitInst(Mips::ANDi, TempReg[4]).addReg(SrcReg).addImm(0xFF00);
1656 emitInst(Mips::SLL, TempReg[5]).addReg(TempReg[4]).addImm(8);
1657
1658 emitInst(Mips::SLL, TempReg[6]).addReg(SrcReg).addImm(24);
1659 emitInst(Mips::OR, TempReg[7]).addReg(TempReg[3]).addReg(TempReg[5]);
1660 emitInst(Mips::OR, DestReg).addReg(TempReg[6]).addReg(TempReg[7]);
1661 updateValueMap(II, DestReg);
1662 return true;
1663 }
1664 }
1665 return false;
1666 }
1667 case Intrinsic::memcpy:
1668 case Intrinsic::memmove: {
1669 const auto *MTI = cast<MemTransferInst>(II);
1670 // Don't handle volatile.
1671 if (MTI->isVolatile())
1672 return false;
1673 if (!MTI->getLength()->getType()->isIntegerTy(32))
1674 return false;
1675 const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
1676 return lowerCallTo(II, IntrMemName, II->arg_size() - 1);
1677 }
1678 case Intrinsic::memset: {
1679 const MemSetInst *MSI = cast<MemSetInst>(II);
1680 // Don't handle volatile.
1681 if (MSI->isVolatile())
1682 return false;
1683 if (!MSI->getLength()->getType()->isIntegerTy(32))
1684 return false;
1685 return lowerCallTo(II, "memset", II->arg_size() - 1);
1686 }
1687 }
1688 return false;
1689}
1690
1691bool MipsFastISel::selectRet(const Instruction *I) {
1692 const Function &F = *I->getParent()->getParent();
1693 const ReturnInst *Ret = cast<ReturnInst>(I);
1694
1695 LLVM_DEBUG(dbgs() << "selectRet\n");
1696
1697 if (!FuncInfo.CanLowerReturn)
1698 return false;
1699
1700 // Build a list of return value registers.
1701 SmallVector<unsigned, 4> RetRegs;
1702
1703 if (Ret->getNumOperands() > 0) {
1704 CallingConv::ID CC = F.getCallingConv();
1705
1706 // Do not handle FastCC.
1707 if (CC == CallingConv::Fast)
1708 return false;
1709
1711 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1712
1713 // Analyze operands of the call, assigning locations to each operand.
1715 MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
1716 I->getContext());
1717 CCAssignFn *RetCC = RetCC_Mips;
1718 CCInfo.AnalyzeReturn(Outs, RetCC);
1719
1720 // Only handle a single return value for now.
1721 if (ValLocs.size() != 1)
1722 return false;
1723
1724 CCValAssign &VA = ValLocs[0];
1725 const Value *RV = Ret->getOperand(0);
1726
1727 // Don't bother handling odd stuff for now.
1728 if ((VA.getLocInfo() != CCValAssign::Full) &&
1729 (VA.getLocInfo() != CCValAssign::BCvt))
1730 return false;
1731
1732 // Only handle register returns for now.
1733 if (!VA.isRegLoc())
1734 return false;
1735
1736 Register Reg = getRegForValue(RV);
1737 if (Reg == 0)
1738 return false;
1739
1740 unsigned SrcReg = Reg + VA.getValNo();
1741 Register DestReg = VA.getLocReg();
1742 // Avoid a cross-class copy. This is very unlikely.
1743 if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1744 return false;
1745
1746 EVT RVEVT = TLI.getValueType(DL, RV->getType());
1747 if (!RVEVT.isSimple())
1748 return false;
1749
1750 if (RVEVT.isVector())
1751 return false;
1752
1753 MVT RVVT = RVEVT.getSimpleVT();
1754 if (RVVT == MVT::f128)
1755 return false;
1756
1757 // Do not handle FGR64 returns for now.
1758 if (RVVT == MVT::f64 && UnsupportedFPMode) {
1759 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
1760 return false;
1761 }
1762
1763 MVT DestVT = VA.getValVT();
1764 // Special handling for extended integers.
1765 if (RVVT != DestVT) {
1766 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1767 return false;
1768
1769 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
1770 bool IsZExt = Outs[0].Flags.isZExt();
1771 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
1772 if (SrcReg == 0)
1773 return false;
1774 }
1775 }
1776
1777 // Make the copy.
1778 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1779 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1780
1781 // Add register to return instruction.
1782 RetRegs.push_back(VA.getLocReg());
1783 }
1784 MachineInstrBuilder MIB = emitInst(Mips::RetRA);
1785 for (unsigned Reg : RetRegs)
1787 return true;
1788}
1789
1790bool MipsFastISel::selectTrunc(const Instruction *I) {
1791 // The high bits for a type smaller than the register size are assumed to be
1792 // undefined.
1793 Value *Op = I->getOperand(0);
1794
1795 EVT SrcVT, DestVT;
1796 SrcVT = TLI.getValueType(DL, Op->getType(), true);
1797 DestVT = TLI.getValueType(DL, I->getType(), true);
1798
1799 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1800 return false;
1801 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1802 return false;
1803
1804 Register SrcReg = getRegForValue(Op);
1805 if (!SrcReg)
1806 return false;
1807
1808 // Because the high bits are undefined, a truncate doesn't generate
1809 // any code.
1810 updateValueMap(I, SrcReg);
1811 return true;
1812}
1813
1814bool MipsFastISel::selectIntExt(const Instruction *I) {
1815 Type *DestTy = I->getType();
1816 Value *Src = I->getOperand(0);
1817 Type *SrcTy = Src->getType();
1818
1819 bool isZExt = isa<ZExtInst>(I);
1820 Register SrcReg = getRegForValue(Src);
1821 if (!SrcReg)
1822 return false;
1823
1824 EVT SrcEVT, DestEVT;
1825 SrcEVT = TLI.getValueType(DL, SrcTy, true);
1826 DestEVT = TLI.getValueType(DL, DestTy, true);
1827 if (!SrcEVT.isSimple())
1828 return false;
1829 if (!DestEVT.isSimple())
1830 return false;
1831
1832 MVT SrcVT = SrcEVT.getSimpleVT();
1833 MVT DestVT = DestEVT.getSimpleVT();
1834 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1835
1836 if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
1837 return false;
1838 updateValueMap(I, ResultReg);
1839 return true;
1840}
1841
1842bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1843 unsigned DestReg) {
1844 unsigned ShiftAmt;
1845 switch (SrcVT.SimpleTy) {
1846 default:
1847 return false;
1848 case MVT::i8:
1849 ShiftAmt = 24;
1850 break;
1851 case MVT::i16:
1852 ShiftAmt = 16;
1853 break;
1854 }
1855 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1856 emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
1857 emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
1858 return true;
1859}
1860
1861bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1862 unsigned DestReg) {
1863 switch (SrcVT.SimpleTy) {
1864 default:
1865 return false;
1866 case MVT::i8:
1867 emitInst(Mips::SEB, DestReg).addReg(SrcReg);
1868 break;
1869 case MVT::i16:
1870 emitInst(Mips::SEH, DestReg).addReg(SrcReg);
1871 break;
1872 }
1873 return true;
1874}
1875
1876bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1877 unsigned DestReg) {
1878 if ((DestVT != MVT::i32) && (DestVT != MVT::i16))
1879 return false;
1880 if (Subtarget->hasMips32r2())
1881 return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg);
1882 return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg);
1883}
1884
1885bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1886 unsigned DestReg) {
1887 int64_t Imm;
1888
1889 switch (SrcVT.SimpleTy) {
1890 default:
1891 return false;
1892 case MVT::i1:
1893 Imm = 1;
1894 break;
1895 case MVT::i8:
1896 Imm = 0xff;
1897 break;
1898 case MVT::i16:
1899 Imm = 0xffff;
1900 break;
1901 }
1902
1903 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(Imm);
1904 return true;
1905}
1906
1907bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1908 unsigned DestReg, bool IsZExt) {
1909 // FastISel does not have plumbing to deal with extensions where the SrcVT or
1910 // DestVT are odd things, so test to make sure that they are both types we can
1911 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
1912 // bail out to SelectionDAG.
1913 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && (DestVT != MVT::i32)) ||
1914 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && (SrcVT != MVT::i16)))
1915 return false;
1916 if (IsZExt)
1917 return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg);
1918 return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg);
1919}
1920
1921unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1922 bool isZExt) {
1923 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1924 bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
1925 return Success ? DestReg : 0;
1926}
1927
1928bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) {
1929 EVT DestEVT = TLI.getValueType(DL, I->getType(), true);
1930 if (!DestEVT.isSimple())
1931 return false;
1932
1933 MVT DestVT = DestEVT.getSimpleVT();
1934 if (DestVT != MVT::i32)
1935 return false;
1936
1937 unsigned DivOpc;
1938 switch (ISDOpcode) {
1939 default:
1940 return false;
1941 case ISD::SDIV:
1942 case ISD::SREM:
1943 DivOpc = Mips::SDIV;
1944 break;
1945 case ISD::UDIV:
1946 case ISD::UREM:
1947 DivOpc = Mips::UDIV;
1948 break;
1949 }
1950
1951 Register Src0Reg = getRegForValue(I->getOperand(0));
1952 Register Src1Reg = getRegForValue(I->getOperand(1));
1953 if (!Src0Reg || !Src1Reg)
1954 return false;
1955
1956 emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
1957 if (!isa<ConstantInt>(I->getOperand(1)) ||
1958 dyn_cast<ConstantInt>(I->getOperand(1))->isZero()) {
1959 emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
1960 }
1961
1962 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1963 if (!ResultReg)
1964 return false;
1965
1966 unsigned MFOpc = (ISDOpcode == ISD::SREM || ISDOpcode == ISD::UREM)
1967 ? Mips::MFHI
1968 : Mips::MFLO;
1969 emitInst(MFOpc, ResultReg);
1970
1971 updateValueMap(I, ResultReg);
1972 return true;
1973}
1974
1975bool MipsFastISel::selectShift(const Instruction *I) {
1976 MVT RetVT;
1977
1978 if (!isTypeSupported(I->getType(), RetVT))
1979 return false;
1980
1981 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1982 if (!ResultReg)
1983 return false;
1984
1985 unsigned Opcode = I->getOpcode();
1986 const Value *Op0 = I->getOperand(0);
1987 Register Op0Reg = getRegForValue(Op0);
1988 if (!Op0Reg)
1989 return false;
1990
1991 // If AShr or LShr, then we need to make sure the operand0 is sign extended.
1992 if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) {
1993 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1994 if (!TempReg)
1995 return false;
1996
1997 MVT Op0MVT = TLI.getValueType(DL, Op0->getType(), true).getSimpleVT();
1998 bool IsZExt = Opcode == Instruction::LShr;
1999 if (!emitIntExt(Op0MVT, Op0Reg, MVT::i32, TempReg, IsZExt))
2000 return false;
2001
2002 Op0Reg = TempReg;
2003 }
2004
2005 if (const auto *C = dyn_cast<ConstantInt>(I->getOperand(1))) {
2006 uint64_t ShiftVal = C->getZExtValue();
2007
2008 switch (Opcode) {
2009 default:
2010 llvm_unreachable("Unexpected instruction.");
2011 case Instruction::Shl:
2012 Opcode = Mips::SLL;
2013 break;
2014 case Instruction::AShr:
2015 Opcode = Mips::SRA;
2016 break;
2017 case Instruction::LShr:
2018 Opcode = Mips::SRL;
2019 break;
2020 }
2021
2022 emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal);
2023 updateValueMap(I, ResultReg);
2024 return true;
2025 }
2026
2027 Register Op1Reg = getRegForValue(I->getOperand(1));
2028 if (!Op1Reg)
2029 return false;
2030
2031 switch (Opcode) {
2032 default:
2033 llvm_unreachable("Unexpected instruction.");
2034 case Instruction::Shl:
2035 Opcode = Mips::SLLV;
2036 break;
2037 case Instruction::AShr:
2038 Opcode = Mips::SRAV;
2039 break;
2040 case Instruction::LShr:
2041 Opcode = Mips::SRLV;
2042 break;
2043 }
2044
2045 emitInst(Opcode, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
2046 updateValueMap(I, ResultReg);
2047 return true;
2048}
2049
2050bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
2051 switch (I->getOpcode()) {
2052 default:
2053 break;
2054 case Instruction::Load:
2055 return selectLoad(I);
2056 case Instruction::Store:
2057 return selectStore(I);
2058 case Instruction::SDiv:
2059 if (!selectBinaryOp(I, ISD::SDIV))
2060 return selectDivRem(I, ISD::SDIV);
2061 return true;
2062 case Instruction::UDiv:
2063 if (!selectBinaryOp(I, ISD::UDIV))
2064 return selectDivRem(I, ISD::UDIV);
2065 return true;
2066 case Instruction::SRem:
2067 if (!selectBinaryOp(I, ISD::SREM))
2068 return selectDivRem(I, ISD::SREM);
2069 return true;
2070 case Instruction::URem:
2071 if (!selectBinaryOp(I, ISD::UREM))
2072 return selectDivRem(I, ISD::UREM);
2073 return true;
2074 case Instruction::Shl:
2075 case Instruction::LShr:
2076 case Instruction::AShr:
2077 return selectShift(I);
2078 case Instruction::And:
2079 case Instruction::Or:
2080 case Instruction::Xor:
2081 return selectLogicalOp(I);
2082 case Instruction::Br:
2083 return selectBranch(I);
2084 case Instruction::Ret:
2085 return selectRet(I);
2086 case Instruction::Trunc:
2087 return selectTrunc(I);
2088 case Instruction::ZExt:
2089 case Instruction::SExt:
2090 return selectIntExt(I);
2091 case Instruction::FPTrunc:
2092 return selectFPTrunc(I);
2093 case Instruction::FPExt:
2094 return selectFPExt(I);
2095 case Instruction::FPToSI:
2096 return selectFPToInt(I, /*isSigned*/ true);
2097 case Instruction::FPToUI:
2098 return selectFPToInt(I, /*isSigned*/ false);
2099 case Instruction::ICmp:
2100 case Instruction::FCmp:
2101 return selectCmp(I);
2102 case Instruction::Select:
2103 return selectSelect(I);
2104 }
2105 return false;
2106}
2107
2108unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
2109 bool IsUnsigned) {
2110 Register VReg = getRegForValue(V);
2111 if (VReg == 0)
2112 return 0;
2113 MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT();
2114
2115 if (VMVT == MVT::i1)
2116 return 0;
2117
2118 if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
2119 Register TempReg = createResultReg(&Mips::GPR32RegClass);
2120 if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
2121 return 0;
2122 VReg = TempReg;
2123 }
2124 return VReg;
2125}
2126
2127void MipsFastISel::simplifyAddress(Address &Addr) {
2128 if (!isInt<16>(Addr.getOffset())) {
2129 unsigned TempReg =
2130 materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
2131 Register DestReg = createResultReg(&Mips::GPR32RegClass);
2132 emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
2133 Addr.setReg(DestReg);
2134 Addr.setOffset(0);
2135 }
2136}
2137
2138unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2139 const TargetRegisterClass *RC,
2140 unsigned Op0, unsigned Op1) {
2141 // We treat the MUL instruction in a special way because it clobbers
2142 // the HI0 & LO0 registers. The TableGen definition of this instruction can
2143 // mark these registers only as implicitly defined. As a result, the
2144 // register allocator runs out of registers when this instruction is
2145 // followed by another instruction that defines the same registers too.
2146 // We can fix this by explicitly marking those registers as dead.
2147 if (MachineInstOpcode == Mips::MUL) {
2148 Register ResultReg = createResultReg(RC);
2149 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2150 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2151 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2152 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2153 .addReg(Op0)
2154 .addReg(Op1)
2157 return ResultReg;
2158 }
2159
2160 return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op1);
2161}
2162
2163namespace llvm {
2164
2166 const TargetLibraryInfo *libInfo,
2167 const LibcallLoweringInfo *libcallLowering) {
2168 return new MipsFastISel(funcInfo, libInfo, libcallLowering);
2169}
2170
2171} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV),...
static void emitLoad(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPostDec)
Emit a load-pair instruction for frame-destroy.
static void emitStore(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPreDec)
Emit a store-pair instruction for frame-setup.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file defines the FastISel class.
const HexagonInstrInfo * TII
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
cl::opt< bool > EmitJalrReloc
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State, ArrayRef< MCPhysReg > F64Regs)
uint64_t IntrinsicInst * II
#define P(N)
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1416
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1549
an instruction to allocate memory on the stack
PointerType * getType() const
Overload to return most specific pointer type.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
CCState - This class holds information needed while lowering arguments and return values.
void convertToReg(MCRegister Reg)
Register getLocReg() const
LocInfo getLocInfo() const
bool needsCustom() const
int64_t getLocMemOffset() const
unsigned getValNo() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
bool isUnsigned() const
Definition InstrTypes.h:936
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
This is an important base class in LLVM.
Definition Constant.h:43
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
bool hasLocalLinkage() const
bool hasInternalLinkage() const
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Tracks which library functions to use for a particular subtarget.
Align getAlign() const
Return the alignment of the access that is being performed.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Machine Value Type.
SimpleValueType SimpleTy
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Value * getLength() const
bool isVolatile() const
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
Register getGlobalBaseReg(MachineFunction &MF)
bool isFP64bit() const
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
bool inMips16Mode() const
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
bool hasMips32r2() const
const MipsTargetLowering * getTargetLowering() const override
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
TypeSize getElementOffset(unsigned Idx) const
Definition DataLayout.h:754
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
const Use * const_op_iterator
Definition User.h:255
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition ilist_node.h:34
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:738
Flag
These should be considered private to the implementation of the MCInstrDesc class.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Define
Register definition.
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition Utils.cpp:56
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.