LLVM  4.0.0
MipsFastISel.cpp
Go to the documentation of this file.
1 //===-- MipsFastISel.cpp - Mips FastISel implementation --------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// \brief This file defines the MIPS-specific support for the FastISel class.
12 /// Some of the target-specific code is generated by tablegen in the file
13 /// MipsGenFastISel.inc, which is #included here.
14 ///
15 //===----------------------------------------------------------------------===//
16 
17 #include "MipsCCState.h"
18 #include "MipsInstrInfo.h"
19 #include "MipsISelLowering.h"
20 #include "MipsMachineFunction.h"
21 #include "MipsRegisterInfo.h"
22 #include "MipsSubtarget.h"
23 #include "MipsTargetMachine.h"
25 #include "llvm/CodeGen/FastISel.h"
30 #include "llvm/IR/GlobalAlias.h"
31 #include "llvm/IR/GlobalVariable.h"
32 #include "llvm/MC/MCSymbol.h"
34 #include "llvm/Support/Debug.h"
35 
36 #define DEBUG_TYPE "mips-fastisel"
37 
38 using namespace llvm;
39 
40 namespace {
41 
42 class MipsFastISel final : public FastISel {
43 
44  // All possible address modes.
45  class Address {
46  public:
47  typedef enum { RegBase, FrameIndexBase } BaseKind;
48 
49  private:
50  BaseKind Kind;
51  union {
52  unsigned Reg;
53  int FI;
54  } Base;
55 
56  int64_t Offset;
57 
58  const GlobalValue *GV;
59 
60  public:
61  // Innocuous defaults for our address.
62  Address() : Kind(RegBase), Offset(0), GV(0) { Base.Reg = 0; }
63  void setKind(BaseKind K) { Kind = K; }
64  BaseKind getKind() const { return Kind; }
65  bool isRegBase() const { return Kind == RegBase; }
66  bool isFIBase() const { return Kind == FrameIndexBase; }
67  void setReg(unsigned Reg) {
68  assert(isRegBase() && "Invalid base register access!");
69  Base.Reg = Reg;
70  }
71  unsigned getReg() const {
72  assert(isRegBase() && "Invalid base register access!");
73  return Base.Reg;
74  }
75  void setFI(unsigned FI) {
76  assert(isFIBase() && "Invalid base frame index access!");
77  Base.FI = FI;
78  }
79  unsigned getFI() const {
80  assert(isFIBase() && "Invalid base frame index access!");
81  return Base.FI;
82  }
83 
84  void setOffset(int64_t Offset_) { Offset = Offset_; }
85  int64_t getOffset() const { return Offset; }
86  void setGlobalValue(const GlobalValue *G) { GV = G; }
87  const GlobalValue *getGlobalValue() { return GV; }
88  };
89 
90  /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
91  /// make the right decision when generating code for different targets.
92  const TargetMachine &TM;
93  const MipsSubtarget *Subtarget;
94  const TargetInstrInfo &TII;
95  const TargetLowering &TLI;
96  MipsFunctionInfo *MFI;
97 
98  // Convenience variables to avoid some queries.
100 
101  bool fastLowerArguments() override;
102  bool fastLowerCall(CallLoweringInfo &CLI) override;
103  bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
104 
105  bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle
106  // floating point but not reject doing fast-isel in other
107  // situations
108 
109 private:
110  // Selection routines.
111  bool selectLogicalOp(const Instruction *I);
112  bool selectLoad(const Instruction *I);
113  bool selectStore(const Instruction *I);
114  bool selectBranch(const Instruction *I);
115  bool selectSelect(const Instruction *I);
116  bool selectCmp(const Instruction *I);
117  bool selectFPExt(const Instruction *I);
118  bool selectFPTrunc(const Instruction *I);
119  bool selectFPToInt(const Instruction *I, bool IsSigned);
120  bool selectRet(const Instruction *I);
121  bool selectTrunc(const Instruction *I);
122  bool selectIntExt(const Instruction *I);
123  bool selectShift(const Instruction *I);
124  bool selectDivRem(const Instruction *I, unsigned ISDOpcode);
125 
126  // Utility helper routines.
127  bool isTypeLegal(Type *Ty, MVT &VT);
128  bool isTypeSupported(Type *Ty, MVT &VT);
129  bool isLoadTypeLegal(Type *Ty, MVT &VT);
130  bool computeAddress(const Value *Obj, Address &Addr);
131  bool computeCallAddress(const Value *V, Address &Addr);
132  void simplifyAddress(Address &Addr);
133 
134  // Emit helper routines.
135  bool emitCmp(unsigned DestReg, const CmpInst *CI);
136  bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
137  unsigned Alignment = 0);
138  bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
139  MachineMemOperand *MMO = nullptr);
140  bool emitStore(MVT VT, unsigned SrcReg, Address &Addr,
141  unsigned Alignment = 0);
142  unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
143  bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg,
144 
145  bool IsZExt);
146  bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
147 
148  bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
149  bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
150  unsigned DestReg);
151  bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
152  unsigned DestReg);
153 
154  unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned);
155 
156  unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
157  const Value *RHS);
158 
159  unsigned materializeFP(const ConstantFP *CFP, MVT VT);
160  unsigned materializeGV(const GlobalValue *GV, MVT VT);
161  unsigned materializeInt(const Constant *C, MVT VT);
162  unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC);
163  unsigned materializeExternalCallSym(MCSymbol *Syn);
164 
165  MachineInstrBuilder emitInst(unsigned Opc) {
166  return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
167  }
168  MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) {
169  return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
170  DstReg);
171  }
172  MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg,
173  unsigned MemReg, int64_t MemOffset) {
174  return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset);
175  }
176  MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg,
177  unsigned MemReg, int64_t MemOffset) {
178  return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset);
179  }
180 
181  unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
182  const TargetRegisterClass *RC,
183  unsigned Op0, bool Op0IsKill,
184  unsigned Op1, bool Op1IsKill);
185 
186  // for some reason, this default is not generated by tablegen
187  // so we explicitly generate it here.
188  //
189  unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
190  unsigned Op0, bool Op0IsKill, uint64_t imm1,
191  uint64_t imm2, unsigned Op3, bool Op3IsKill) {
192  return 0;
193  }
194 
195  // Call handling routines.
196 private:
197  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
198  bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
199  unsigned &NumBytes);
200  bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
201  const MipsABIInfo &getABI() const {
202  return static_cast<const MipsTargetMachine &>(TM).getABI();
203  }
204 
205 public:
206  // Backend specific FastISel code.
207  explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
208  const TargetLibraryInfo *libInfo)
209  : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()),
210  Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()),
211  TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
212  MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
213  Context = &funcInfo.Fn->getContext();
214  UnsupportedFPMode = Subtarget->isFP64bit() || Subtarget->useSoftFloat();
215  }
216 
217  unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
218  unsigned fastMaterializeConstant(const Constant *C) override;
219  bool fastSelectInstruction(const Instruction *I) override;
220 
221 #include "MipsGenFastISel.inc"
222 };
223 } // end anonymous namespace.
224 
225 static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT,
226  CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
228 
229 static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT,
230  CCValAssign::LocInfo LocInfo,
231  ISD::ArgFlagsTy ArgFlags, CCState &State) {
232  llvm_unreachable("should not be called");
233 }
234 
235 static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
236  CCValAssign::LocInfo LocInfo,
237  ISD::ArgFlagsTy ArgFlags, CCState &State) {
238  llvm_unreachable("should not be called");
239 }
240 
241 #include "MipsGenCallingConv.inc"
242 
243 CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const {
244  return CC_MipsO32;
245 }
246 
247 unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
248  const Value *LHS, const Value *RHS) {
249  // Canonicalize immediates to the RHS first.
250  if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
251  std::swap(LHS, RHS);
252 
253  unsigned Opc;
254  switch (ISDOpc) {
255  case ISD::AND:
256  Opc = Mips::AND;
257  break;
258  case ISD::OR:
259  Opc = Mips::OR;
260  break;
261  case ISD::XOR:
262  Opc = Mips::XOR;
263  break;
264  default:
265  llvm_unreachable("unexpected opcode");
266  }
267 
268  unsigned LHSReg = getRegForValue(LHS);
269  if (!LHSReg)
270  return 0;
271 
272  unsigned RHSReg;
273  if (const auto *C = dyn_cast<ConstantInt>(RHS))
274  RHSReg = materializeInt(C, MVT::i32);
275  else
276  RHSReg = getRegForValue(RHS);
277  if (!RHSReg)
278  return 0;
279 
280  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
281  if (!ResultReg)
282  return 0;
283 
284  emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg);
285  return ResultReg;
286 }
287 
288 unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
289  assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i32 &&
290  "Alloca should always return a pointer.");
291 
293  FuncInfo.StaticAllocaMap.find(AI);
294 
295  if (SI != FuncInfo.StaticAllocaMap.end()) {
296  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
297  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LEA_ADDiu),
298  ResultReg)
299  .addFrameIndex(SI->second)
300  .addImm(0);
301  return ResultReg;
302  }
303 
304  return 0;
305 }
306 
307 unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
308  if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
309  return 0;
310  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
311  const ConstantInt *CI = cast<ConstantInt>(C);
312  return materialize32BitInt(CI->getZExtValue(), RC);
313 }
314 
315 unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
316  const TargetRegisterClass *RC) {
317  unsigned ResultReg = createResultReg(RC);
318 
319  if (isInt<16>(Imm)) {
320  unsigned Opc = Mips::ADDiu;
321  emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
322  return ResultReg;
323  } else if (isUInt<16>(Imm)) {
324  emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
325  return ResultReg;
326  }
327  unsigned Lo = Imm & 0xFFFF;
328  unsigned Hi = (Imm >> 16) & 0xFFFF;
329  if (Lo) {
330  // Both Lo and Hi have nonzero bits.
331  unsigned TmpReg = createResultReg(RC);
332  emitInst(Mips::LUi, TmpReg).addImm(Hi);
333  emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
334  } else {
335  emitInst(Mips::LUi, ResultReg).addImm(Hi);
336  }
337  return ResultReg;
338 }
339 
340 unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
341  if (UnsupportedFPMode)
342  return 0;
343  int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
344  if (VT == MVT::f32) {
345  const TargetRegisterClass *RC = &Mips::FGR32RegClass;
346  unsigned DestReg = createResultReg(RC);
347  unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
348  emitInst(Mips::MTC1, DestReg).addReg(TempReg);
349  return DestReg;
350  } else if (VT == MVT::f64) {
351  const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
352  unsigned DestReg = createResultReg(RC);
353  unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
354  unsigned TempReg2 =
355  materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
356  emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
357  return DestReg;
358  }
359  return 0;
360 }
361 
362 unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
363  // For now 32-bit only.
364  if (VT != MVT::i32)
365  return 0;
366  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
367  unsigned DestReg = createResultReg(RC);
368  const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
369  bool IsThreadLocal = GVar && GVar->isThreadLocal();
370  // TLS not supported at this time.
371  if (IsThreadLocal)
372  return 0;
373  emitInst(Mips::LW, DestReg)
374  .addReg(MFI->getGlobalBaseReg())
375  .addGlobalAddress(GV, 0, MipsII::MO_GOT);
376  if ((GV->hasInternalLinkage() ||
377  (GV->hasLocalLinkage() && !isa<Function>(GV)))) {
378  unsigned TempReg = createResultReg(RC);
379  emitInst(Mips::ADDiu, TempReg)
380  .addReg(DestReg)
381  .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
382  DestReg = TempReg;
383  }
384  return DestReg;
385 }
386 
387 unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
388  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
389  unsigned DestReg = createResultReg(RC);
390  emitInst(Mips::LW, DestReg)
391  .addReg(MFI->getGlobalBaseReg())
392  .addSym(Sym, MipsII::MO_GOT);
393  return DestReg;
394 }
395 
396 // Materialize a constant into a register, and return the register
397 // number (or zero if we failed to handle it).
398 unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) {
399  EVT CEVT = TLI.getValueType(DL, C->getType(), true);
400 
401  // Only handle simple types.
402  if (!CEVT.isSimple())
403  return 0;
404  MVT VT = CEVT.getSimpleVT();
405 
406  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
407  return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT);
408  else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
409  return materializeGV(GV, VT);
410  else if (isa<ConstantInt>(C))
411  return materializeInt(C, VT);
412 
413  return 0;
414 }
415 
416 bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
417 
418  const User *U = nullptr;
419  unsigned Opcode = Instruction::UserOp1;
420  if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
421  // Don't walk into other basic blocks unless the object is an alloca from
422  // another block, otherwise it may not have a virtual register assigned.
423  if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
424  FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
425  Opcode = I->getOpcode();
426  U = I;
427  }
428  } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
429  Opcode = C->getOpcode();
430  U = C;
431  }
432  switch (Opcode) {
433  default:
434  break;
435  case Instruction::BitCast: {
436  // Look through bitcasts.
437  return computeAddress(U->getOperand(0), Addr);
438  }
439  case Instruction::GetElementPtr: {
440  Address SavedAddr = Addr;
441  int64_t TmpOffset = Addr.getOffset();
442  // Iterate through the GEP folding the constants into offsets where
443  // we can.
445  for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
446  ++i, ++GTI) {
447  const Value *Op = *i;
448  if (StructType *STy = GTI.getStructTypeOrNull()) {
449  const StructLayout *SL = DL.getStructLayout(STy);
450  unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
451  TmpOffset += SL->getElementOffset(Idx);
452  } else {
453  uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
454  for (;;) {
455  if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
456  // Constant-offset addressing.
457  TmpOffset += CI->getSExtValue() * S;
458  break;
459  }
460  if (canFoldAddIntoGEP(U, Op)) {
461  // A compatible add with a constant operand. Fold the constant.
462  ConstantInt *CI =
463  cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
464  TmpOffset += CI->getSExtValue() * S;
465  // Iterate on the other operand.
466  Op = cast<AddOperator>(Op)->getOperand(0);
467  continue;
468  }
469  // Unsupported
470  goto unsupported_gep;
471  }
472  }
473  }
474  // Try to grab the base operand now.
475  Addr.setOffset(TmpOffset);
476  if (computeAddress(U->getOperand(0), Addr))
477  return true;
478  // We failed, restore everything and try the other options.
479  Addr = SavedAddr;
480  unsupported_gep:
481  break;
482  }
483  case Instruction::Alloca: {
484  const AllocaInst *AI = cast<AllocaInst>(Obj);
486  FuncInfo.StaticAllocaMap.find(AI);
487  if (SI != FuncInfo.StaticAllocaMap.end()) {
488  Addr.setKind(Address::FrameIndexBase);
489  Addr.setFI(SI->second);
490  return true;
491  }
492  break;
493  }
494  }
495  Addr.setReg(getRegForValue(Obj));
496  return Addr.getReg() != 0;
497 }
498 
499 bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
500  const User *U = nullptr;
501  unsigned Opcode = Instruction::UserOp1;
502 
503  if (const auto *I = dyn_cast<Instruction>(V)) {
504  // Check if the value is defined in the same basic block. This information
505  // is crucial to know whether or not folding an operand is valid.
506  if (I->getParent() == FuncInfo.MBB->getBasicBlock()) {
507  Opcode = I->getOpcode();
508  U = I;
509  }
510  } else if (const auto *C = dyn_cast<ConstantExpr>(V)) {
511  Opcode = C->getOpcode();
512  U = C;
513  }
514 
515  switch (Opcode) {
516  default:
517  break;
518  case Instruction::BitCast:
519  // Look past bitcasts if its operand is in the same BB.
520  return computeCallAddress(U->getOperand(0), Addr);
521  break;
522  case Instruction::IntToPtr:
523  // Look past no-op inttoptrs if its operand is in the same BB.
524  if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
525  TLI.getPointerTy(DL))
526  return computeCallAddress(U->getOperand(0), Addr);
527  break;
528  case Instruction::PtrToInt:
529  // Look past no-op ptrtoints if its operand is in the same BB.
530  if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
531  return computeCallAddress(U->getOperand(0), Addr);
532  break;
533  }
534 
535  if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
536  Addr.setGlobalValue(GV);
537  return true;
538  }
539 
540  // If all else fails, try to materialize the value in a register.
541  if (!Addr.getGlobalValue()) {
542  Addr.setReg(getRegForValue(V));
543  return Addr.getReg() != 0;
544  }
545 
546  return false;
547 }
548 
549 bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) {
550  EVT evt = TLI.getValueType(DL, Ty, true);
551  // Only handle simple types.
552  if (evt == MVT::Other || !evt.isSimple())
553  return false;
554  VT = evt.getSimpleVT();
555 
556  // Handle all legal types, i.e. a register that will directly hold this
557  // value.
558  return TLI.isTypeLegal(VT);
559 }
560 
561 bool MipsFastISel::isTypeSupported(Type *Ty, MVT &VT) {
562  if (Ty->isVectorTy())
563  return false;
564 
565  if (isTypeLegal(Ty, VT))
566  return true;
567 
568  // If this is a type than can be sign or zero-extended to a basic operation
569  // go ahead and accept it now.
570  if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
571  return true;
572 
573  return false;
574 }
575 
576 bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
577  if (isTypeLegal(Ty, VT))
578  return true;
579  // We will extend this in a later patch:
580  // If this is a type than can be sign or zero-extended to a basic operation
581  // go ahead and accept it now.
582  if (VT == MVT::i8 || VT == MVT::i16)
583  return true;
584  return false;
585 }
586 // Because of how EmitCmp is called with fast-isel, you can
587 // end up with redundant "andi" instructions after the sequences emitted below.
588 // We should try and solve this issue in the future.
589 //
590 bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
591  const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1);
592  bool IsUnsigned = CI->isUnsigned();
593  unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned);
594  if (LeftReg == 0)
595  return false;
596  unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned);
597  if (RightReg == 0)
598  return false;
600 
601  switch (P) {
602  default:
603  return false;
604  case CmpInst::ICMP_EQ: {
605  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
606  emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
607  emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
608  break;
609  }
610  case CmpInst::ICMP_NE: {
611  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
612  emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
613  emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
614  break;
615  }
616  case CmpInst::ICMP_UGT: {
617  emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg);
618  break;
619  }
620  case CmpInst::ICMP_ULT: {
621  emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
622  break;
623  }
624  case CmpInst::ICMP_UGE: {
625  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
626  emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
627  emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
628  break;
629  }
630  case CmpInst::ICMP_ULE: {
631  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
632  emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
633  emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
634  break;
635  }
636  case CmpInst::ICMP_SGT: {
637  emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg);
638  break;
639  }
640  case CmpInst::ICMP_SLT: {
641  emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
642  break;
643  }
644  case CmpInst::ICMP_SGE: {
645  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
646  emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
647  emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
648  break;
649  }
650  case CmpInst::ICMP_SLE: {
651  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
652  emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
653  emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
654  break;
655  }
656  case CmpInst::FCMP_OEQ:
657  case CmpInst::FCMP_UNE:
658  case CmpInst::FCMP_OLT:
659  case CmpInst::FCMP_OLE:
660  case CmpInst::FCMP_OGT:
661  case CmpInst::FCMP_OGE: {
662  if (UnsupportedFPMode)
663  return false;
664  bool IsFloat = Left->getType()->isFloatTy();
665  bool IsDouble = Left->getType()->isDoubleTy();
666  if (!IsFloat && !IsDouble)
667  return false;
668  unsigned Opc, CondMovOpc;
669  switch (P) {
670  case CmpInst::FCMP_OEQ:
671  Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
672  CondMovOpc = Mips::MOVT_I;
673  break;
674  case CmpInst::FCMP_UNE:
675  Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
676  CondMovOpc = Mips::MOVF_I;
677  break;
678  case CmpInst::FCMP_OLT:
679  Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32;
680  CondMovOpc = Mips::MOVT_I;
681  break;
682  case CmpInst::FCMP_OLE:
683  Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32;
684  CondMovOpc = Mips::MOVT_I;
685  break;
686  case CmpInst::FCMP_OGT:
687  Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32;
688  CondMovOpc = Mips::MOVF_I;
689  break;
690  case CmpInst::FCMP_OGE:
691  Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32;
692  CondMovOpc = Mips::MOVF_I;
693  break;
694  default:
695  llvm_unreachable("Only switching of a subset of CCs.");
696  }
697  unsigned RegWithZero = createResultReg(&Mips::GPR32RegClass);
698  unsigned RegWithOne = createResultReg(&Mips::GPR32RegClass);
699  emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
700  emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
701  emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
702  .addReg(RightReg);
703  emitInst(CondMovOpc, ResultReg)
704  .addReg(RegWithOne)
705  .addReg(Mips::FCC0)
706  .addReg(RegWithZero);
707  break;
708  }
709  }
710  return true;
711 }
712 bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
713  unsigned Alignment) {
714  //
715  // more cases will be handled here in following patches.
716  //
717  unsigned Opc;
718  switch (VT.SimpleTy) {
719  case MVT::i32: {
720  ResultReg = createResultReg(&Mips::GPR32RegClass);
721  Opc = Mips::LW;
722  break;
723  }
724  case MVT::i16: {
725  ResultReg = createResultReg(&Mips::GPR32RegClass);
726  Opc = Mips::LHu;
727  break;
728  }
729  case MVT::i8: {
730  ResultReg = createResultReg(&Mips::GPR32RegClass);
731  Opc = Mips::LBu;
732  break;
733  }
734  case MVT::f32: {
735  if (UnsupportedFPMode)
736  return false;
737  ResultReg = createResultReg(&Mips::FGR32RegClass);
738  Opc = Mips::LWC1;
739  break;
740  }
741  case MVT::f64: {
742  if (UnsupportedFPMode)
743  return false;
744  ResultReg = createResultReg(&Mips::AFGR64RegClass);
745  Opc = Mips::LDC1;
746  break;
747  }
748  default:
749  return false;
750  }
751  if (Addr.isRegBase()) {
752  simplifyAddress(Addr);
753  emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
754  return true;
755  }
756  if (Addr.isFIBase()) {
757  unsigned FI = Addr.getFI();
758  unsigned Align = 4;
759  int64_t Offset = Addr.getOffset();
760  MachineFrameInfo &MFI = MF->getFrameInfo();
761  MachineMemOperand *MMO = MF->getMachineMemOperand(
763  MFI.getObjectSize(FI), Align);
764  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
765  .addFrameIndex(FI)
766  .addImm(Offset)
767  .addMemOperand(MMO);
768  return true;
769  }
770  return false;
771 }
772 
773 bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr,
774  unsigned Alignment) {
775  //
776  // more cases will be handled here in following patches.
777  //
778  unsigned Opc;
779  switch (VT.SimpleTy) {
780  case MVT::i8:
781  Opc = Mips::SB;
782  break;
783  case MVT::i16:
784  Opc = Mips::SH;
785  break;
786  case MVT::i32:
787  Opc = Mips::SW;
788  break;
789  case MVT::f32:
790  if (UnsupportedFPMode)
791  return false;
792  Opc = Mips::SWC1;
793  break;
794  case MVT::f64:
795  if (UnsupportedFPMode)
796  return false;
797  Opc = Mips::SDC1;
798  break;
799  default:
800  return false;
801  }
802  if (Addr.isRegBase()) {
803  simplifyAddress(Addr);
804  emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
805  return true;
806  }
807  if (Addr.isFIBase()) {
808  unsigned FI = Addr.getFI();
809  unsigned Align = 4;
810  int64_t Offset = Addr.getOffset();
811  MachineFrameInfo &MFI = MF->getFrameInfo();
812  MachineMemOperand *MMO = MF->getMachineMemOperand(
814  MFI.getObjectSize(FI), Align);
815  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
816  .addReg(SrcReg)
817  .addFrameIndex(FI)
818  .addImm(Offset)
819  .addMemOperand(MMO);
820  return true;
821  }
822  return false;
823 }
824 
825 bool MipsFastISel::selectLogicalOp(const Instruction *I) {
826  MVT VT;
827  if (!isTypeSupported(I->getType(), VT))
828  return false;
829 
830  unsigned ResultReg;
831  switch (I->getOpcode()) {
832  default:
833  llvm_unreachable("Unexpected instruction.");
834  case Instruction::And:
835  ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1));
836  break;
837  case Instruction::Or:
838  ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1));
839  break;
840  case Instruction::Xor:
841  ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1));
842  break;
843  }
844 
845  if (!ResultReg)
846  return false;
847 
848  updateValueMap(I, ResultReg);
849  return true;
850 }
851 
852 bool MipsFastISel::selectLoad(const Instruction *I) {
853  // Atomic loads need special handling.
854  if (cast<LoadInst>(I)->isAtomic())
855  return false;
856 
857  // Verify we have a legal type before going any further.
858  MVT VT;
859  if (!isLoadTypeLegal(I->getType(), VT))
860  return false;
861 
862  // See if we can handle this address.
863  Address Addr;
864  if (!computeAddress(I->getOperand(0), Addr))
865  return false;
866 
867  unsigned ResultReg;
868  if (!emitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
869  return false;
870  updateValueMap(I, ResultReg);
871  return true;
872 }
873 
874 bool MipsFastISel::selectStore(const Instruction *I) {
875  Value *Op0 = I->getOperand(0);
876  unsigned SrcReg = 0;
877 
878  // Atomic stores need special handling.
879  if (cast<StoreInst>(I)->isAtomic())
880  return false;
881 
882  // Verify we have a legal type before going any further.
883  MVT VT;
884  if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
885  return false;
886 
887  // Get the value to be stored into a register.
888  SrcReg = getRegForValue(Op0);
889  if (SrcReg == 0)
890  return false;
891 
892  // See if we can handle this address.
893  Address Addr;
894  if (!computeAddress(I->getOperand(1), Addr))
895  return false;
896 
897  if (!emitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
898  return false;
899  return true;
900 }
901 
902 //
903 // This can cause a redundant sltiu to be generated.
904 // FIXME: try and eliminate this in a future patch.
905 //
906 bool MipsFastISel::selectBranch(const Instruction *I) {
907  const BranchInst *BI = cast<BranchInst>(I);
908  MachineBasicBlock *BrBB = FuncInfo.MBB;
909  //
910  // TBB is the basic block for the case where the comparison is true.
911  // FBB is the basic block for the case where the comparison is false.
912  // if (cond) goto TBB
913  // goto FBB
914  // TBB:
915  //
916  MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
917  MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
918  BI->getCondition();
919  // For now, just try the simplest case where it's fed by a compare.
920  if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
921  unsigned CondReg = createResultReg(&Mips::GPR32RegClass);
922  if (!emitCmp(CondReg, CI))
923  return false;
924  BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ))
925  .addReg(CondReg)
926  .addMBB(TBB);
927  finishCondBranch(BI->getParent(), TBB, FBB);
928  return true;
929  }
930  return false;
931 }
932 
933 bool MipsFastISel::selectCmp(const Instruction *I) {
934  const CmpInst *CI = cast<CmpInst>(I);
935  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
936  if (!emitCmp(ResultReg, CI))
937  return false;
938  updateValueMap(I, ResultReg);
939  return true;
940 }
941 
942 // Attempt to fast-select a floating-point extend instruction.
943 bool MipsFastISel::selectFPExt(const Instruction *I) {
944  if (UnsupportedFPMode)
945  return false;
946  Value *Src = I->getOperand(0);
947  EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
948  EVT DestVT = TLI.getValueType(DL, I->getType(), true);
949 
950  if (SrcVT != MVT::f32 || DestVT != MVT::f64)
951  return false;
952 
953  unsigned SrcReg =
954  getRegForValue(Src); // this must be a 32bit floating point register class
955  // maybe we should handle this differently
956  if (!SrcReg)
957  return false;
958 
959  unsigned DestReg = createResultReg(&Mips::AFGR64RegClass);
960  emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
961  updateValueMap(I, DestReg);
962  return true;
963 }
964 
965 bool MipsFastISel::selectSelect(const Instruction *I) {
966  assert(isa<SelectInst>(I) && "Expected a select instruction.");
967 
968  DEBUG(dbgs() << "selectSelect\n");
969 
970  MVT VT;
971  if (!isTypeSupported(I->getType(), VT) || UnsupportedFPMode) {
972  DEBUG(dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
973  return false;
974  }
975 
976  unsigned CondMovOpc;
977  const TargetRegisterClass *RC;
978 
979  if (VT.isInteger() && !VT.isVector() && VT.getSizeInBits() <= 32) {
980  CondMovOpc = Mips::MOVN_I_I;
981  RC = &Mips::GPR32RegClass;
982  } else if (VT == MVT::f32) {
983  CondMovOpc = Mips::MOVN_I_S;
984  RC = &Mips::FGR32RegClass;
985  } else if (VT == MVT::f64) {
986  CondMovOpc = Mips::MOVN_I_D32;
987  RC = &Mips::AFGR64RegClass;
988  } else
989  return false;
990 
991  const SelectInst *SI = cast<SelectInst>(I);
992  const Value *Cond = SI->getCondition();
993  unsigned Src1Reg = getRegForValue(SI->getTrueValue());
994  unsigned Src2Reg = getRegForValue(SI->getFalseValue());
995  unsigned CondReg = getRegForValue(Cond);
996 
997  if (!Src1Reg || !Src2Reg || !CondReg)
998  return false;
999 
1000  unsigned ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
1001  if (!ZExtCondReg)
1002  return false;
1003 
1004  if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true))
1005  return false;
1006 
1007  unsigned ResultReg = createResultReg(RC);
1008  unsigned TempReg = createResultReg(RC);
1009 
1010  if (!ResultReg || !TempReg)
1011  return false;
1012 
1013  emitInst(TargetOpcode::COPY, TempReg).addReg(Src2Reg);
1014  emitInst(CondMovOpc, ResultReg)
1015  .addReg(Src1Reg).addReg(ZExtCondReg).addReg(TempReg);
1016  updateValueMap(I, ResultReg);
1017  return true;
1018 }
1019 
1020 // Attempt to fast-select a floating-point truncate instruction.
1021 bool MipsFastISel::selectFPTrunc(const Instruction *I) {
1022  if (UnsupportedFPMode)
1023  return false;
1024  Value *Src = I->getOperand(0);
1025  EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1026  EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1027 
1028  if (SrcVT != MVT::f64 || DestVT != MVT::f32)
1029  return false;
1030 
1031  unsigned SrcReg = getRegForValue(Src);
1032  if (!SrcReg)
1033  return false;
1034 
1035  unsigned DestReg = createResultReg(&Mips::FGR32RegClass);
1036  if (!DestReg)
1037  return false;
1038 
1039  emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg);
1040  updateValueMap(I, DestReg);
1041  return true;
1042 }
1043 
1044 // Attempt to fast-select a floating-point-to-integer conversion.
1045 bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
1046  if (UnsupportedFPMode)
1047  return false;
1048  MVT DstVT, SrcVT;
1049  if (!IsSigned)
1050  return false; // We don't handle this case yet. There is no native
1051  // instruction for this but it can be synthesized.
1052  Type *DstTy = I->getType();
1053  if (!isTypeLegal(DstTy, DstVT))
1054  return false;
1055 
1056  if (DstVT != MVT::i32)
1057  return false;
1058 
1059  Value *Src = I->getOperand(0);
1060  Type *SrcTy = Src->getType();
1061  if (!isTypeLegal(SrcTy, SrcVT))
1062  return false;
1063 
1064  if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
1065  return false;
1066 
1067  unsigned SrcReg = getRegForValue(Src);
1068  if (SrcReg == 0)
1069  return false;
1070 
1071  // Determine the opcode for the conversion, which takes place
1072  // entirely within FPRs.
1073  unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1074  unsigned TempReg = createResultReg(&Mips::FGR32RegClass);
1075  unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32;
1076 
1077  // Generate the convert.
1078  emitInst(Opc, TempReg).addReg(SrcReg);
1079  emitInst(Mips::MFC1, DestReg).addReg(TempReg);
1080 
1081  updateValueMap(I, DestReg);
1082  return true;
1083 }
1084 
1085 bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
1086  SmallVectorImpl<MVT> &OutVTs,
1087  unsigned &NumBytes) {
1088  CallingConv::ID CC = CLI.CallConv;
1090  CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
1091  CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
1092  // Get a count of how many bytes are to be pushed on the stack.
1093  NumBytes = CCInfo.getNextStackOffset();
1094  // This is the minimum argument area used for A0-A3.
1095  if (NumBytes < 16)
1096  NumBytes = 16;
1097 
1098  emitInst(Mips::ADJCALLSTACKDOWN).addImm(16);
1099  // Process the args.
1100  MVT firstMVT;
1101  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1102  CCValAssign &VA = ArgLocs[i];
1103  const Value *ArgVal = CLI.OutVals[VA.getValNo()];
1104  MVT ArgVT = OutVTs[VA.getValNo()];
1105 
1106  if (i == 0) {
1107  firstMVT = ArgVT;
1108  if (ArgVT == MVT::f32) {
1109  VA.convertToReg(Mips::F12);
1110  } else if (ArgVT == MVT::f64) {
1111  VA.convertToReg(Mips::D6);
1112  }
1113  } else if (i == 1) {
1114  if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
1115  if (ArgVT == MVT::f32) {
1116  VA.convertToReg(Mips::F14);
1117  } else if (ArgVT == MVT::f64) {
1118  VA.convertToReg(Mips::D7);
1119  }
1120  }
1121  }
1122  if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32) || (ArgVT == MVT::i16) ||
1123  (ArgVT == MVT::i8)) &&
1124  VA.isMemLoc()) {
1125  switch (VA.getLocMemOffset()) {
1126  case 0:
1127  VA.convertToReg(Mips::A0);
1128  break;
1129  case 4:
1130  VA.convertToReg(Mips::A1);
1131  break;
1132  case 8:
1133  VA.convertToReg(Mips::A2);
1134  break;
1135  case 12:
1136  VA.convertToReg(Mips::A3);
1137  break;
1138  default:
1139  break;
1140  }
1141  }
1142  unsigned ArgReg = getRegForValue(ArgVal);
1143  if (!ArgReg)
1144  return false;
1145 
1146  // Handle arg promotion: SExt, ZExt, AExt.
1147  switch (VA.getLocInfo()) {
1148  case CCValAssign::Full:
1149  break;
1150  case CCValAssign::AExt:
1151  case CCValAssign::SExt: {
1152  MVT DestVT = VA.getLocVT();
1153  MVT SrcVT = ArgVT;
1154  ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
1155  if (!ArgReg)
1156  return false;
1157  break;
1158  }
1159  case CCValAssign::ZExt: {
1160  MVT DestVT = VA.getLocVT();
1161  MVT SrcVT = ArgVT;
1162  ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
1163  if (!ArgReg)
1164  return false;
1165  break;
1166  }
1167  default:
1168  llvm_unreachable("Unknown arg promotion!");
1169  }
1170 
1171  // Now copy/store arg to correct locations.
1172  if (VA.isRegLoc() && !VA.needsCustom()) {
1173  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1174  TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
1175  CLI.OutRegs.push_back(VA.getLocReg());
1176  } else if (VA.needsCustom()) {
1177  llvm_unreachable("Mips does not use custom args.");
1178  return false;
1179  } else {
1180  //
1181  // FIXME: This path will currently return false. It was copied
1182  // from the AArch64 port and should be essentially fine for Mips too.
1183  // The work to finish up this path will be done in a follow-on patch.
1184  //
1185  assert(VA.isMemLoc() && "Assuming store on stack.");
1186  // Don't emit stores for undef values.
1187  if (isa<UndefValue>(ArgVal))
1188  continue;
1189 
1190  // Need to store on the stack.
1191  // FIXME: This alignment is incorrect but this path is disabled
1192  // for now (will return false). We need to determine the right alignment
1193  // based on the normal alignment for the underlying machine type.
1194  //
1195  unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
1196 
1197  unsigned BEAlign = 0;
1198  if (ArgSize < 8 && !Subtarget->isLittle())
1199  BEAlign = 8 - ArgSize;
1200 
1201  Address Addr;
1202  Addr.setKind(Address::RegBase);
1203  Addr.setReg(Mips::SP);
1204  Addr.setOffset(VA.getLocMemOffset() + BEAlign);
1205 
1206  unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
1207  MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
1208  MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()),
1209  MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
1210  (void)(MMO);
1211  // if (!emitStore(ArgVT, ArgReg, Addr, MMO))
1212  return false; // can't store on the stack yet.
1213  }
1214  }
1215 
1216  return true;
1217 }
1218 
1219 bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
1220  unsigned NumBytes) {
1221  CallingConv::ID CC = CLI.CallConv;
1222  emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0);
1223  if (RetVT != MVT::isVoid) {
1225  CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
1226  CCInfo.AnalyzeCallResult(RetVT, RetCC_Mips);
1227 
1228  // Only handle a single return value.
1229  if (RVLocs.size() != 1)
1230  return false;
1231  // Copy all of the result registers out of their specified physreg.
1232  MVT CopyVT = RVLocs[0].getValVT();
1233  // Special handling for extended integers.
1234  if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
1235  CopyVT = MVT::i32;
1236 
1237  unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1238  if (!ResultReg)
1239  return false;
1240  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1241  TII.get(TargetOpcode::COPY),
1242  ResultReg).addReg(RVLocs[0].getLocReg());
1243  CLI.InRegs.push_back(RVLocs[0].getLocReg());
1244 
1245  CLI.ResultReg = ResultReg;
1246  CLI.NumResultRegs = 1;
1247  }
1248  return true;
1249 }
1250 
1251 bool MipsFastISel::fastLowerArguments() {
1252  DEBUG(dbgs() << "fastLowerArguments\n");
1253 
1254  if (!FuncInfo.CanLowerReturn) {
1255  DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
1256  return false;
1257  }
1258 
1259  const Function *F = FuncInfo.Fn;
1260  if (F->isVarArg()) {
1261  DEBUG(dbgs() << ".. gave up (varargs)\n");
1262  return false;
1263  }
1264 
1265  CallingConv::ID CC = F->getCallingConv();
1266  if (CC != CallingConv::C) {
1267  DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
1268  return false;
1269  }
1270 
1271  const ArrayRef<MCPhysReg> GPR32ArgRegs = {Mips::A0, Mips::A1, Mips::A2,
1272  Mips::A3};
1273  const ArrayRef<MCPhysReg> FGR32ArgRegs = {Mips::F12, Mips::F14};
1274  const ArrayRef<MCPhysReg> AFGR64ArgRegs = {Mips::D6, Mips::D7};
1275  ArrayRef<MCPhysReg>::iterator NextGPR32 = GPR32ArgRegs.begin();
1276  ArrayRef<MCPhysReg>::iterator NextFGR32 = FGR32ArgRegs.begin();
1277  ArrayRef<MCPhysReg>::iterator NextAFGR64 = AFGR64ArgRegs.begin();
1278 
1279  struct AllocatedReg {
1280  const TargetRegisterClass *RC;
1281  unsigned Reg;
1282  AllocatedReg(const TargetRegisterClass *RC, unsigned Reg)
1283  : RC(RC), Reg(Reg) {}
1284  };
1285 
1286  // Only handle simple cases. i.e. All arguments are directly mapped to
1287  // registers of the appropriate type.
1289  unsigned Idx = 1;
1290  for (const auto &FormalArg : F->args()) {
1291  if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
1292  F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
1293  F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) {
1294  DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
1295  return false;
1296  }
1297 
1298  Type *ArgTy = FormalArg.getType();
1299  if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) {
1300  DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
1301  return false;
1302  }
1303 
1304  EVT ArgVT = TLI.getValueType(DL, ArgTy);
1305  DEBUG(dbgs() << ".. " << (Idx - 1) << ": " << ArgVT.getEVTString() << "\n");
1306  if (!ArgVT.isSimple()) {
1307  DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
1308  return false;
1309  }
1310 
1311  switch (ArgVT.getSimpleVT().SimpleTy) {
1312  case MVT::i1:
1313  case MVT::i8:
1314  case MVT::i16:
1315  if (!F->getAttributes().hasAttribute(Idx, Attribute::SExt) &&
1316  !F->getAttributes().hasAttribute(Idx, Attribute::ZExt)) {
1317  // It must be any extend, this shouldn't happen for clang-generated IR
1318  // so just fall back on SelectionDAG.
1319  DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
1320  return false;
1321  }
1322 
1323  if (NextGPR32 == GPR32ArgRegs.end()) {
1324  DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1325  return false;
1326  }
1327 
1328  DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1329  Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1330 
1331  // Allocating any GPR32 prohibits further use of floating point arguments.
1332  NextFGR32 = FGR32ArgRegs.end();
1333  NextAFGR64 = AFGR64ArgRegs.end();
1334  break;
1335 
1336  case MVT::i32:
1337  if (F->getAttributes().hasAttribute(Idx, Attribute::ZExt)) {
1338  // The O32 ABI does not permit a zero-extended i32.
1339  DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
1340  return false;
1341  }
1342 
1343  if (NextGPR32 == GPR32ArgRegs.end()) {
1344  DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1345  return false;
1346  }
1347 
1348  DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1349  Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1350 
1351  // Allocating any GPR32 prohibits further use of floating point arguments.
1352  NextFGR32 = FGR32ArgRegs.end();
1353  NextAFGR64 = AFGR64ArgRegs.end();
1354  break;
1355 
1356  case MVT::f32:
1357  if (UnsupportedFPMode) {
1358  DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1359  return false;
1360  }
1361  if (NextFGR32 == FGR32ArgRegs.end()) {
1362  DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
1363  return false;
1364  }
1365  DEBUG(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
1366  Allocation.emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
1367  // Allocating an FGR32 also allocates the super-register AFGR64, and
1368  // ABI rules require us to skip the corresponding GPR32.
1369  if (NextGPR32 != GPR32ArgRegs.end())
1370  NextGPR32++;
1371  if (NextAFGR64 != AFGR64ArgRegs.end())
1372  NextAFGR64++;
1373  break;
1374 
1375  case MVT::f64:
1376  if (UnsupportedFPMode) {
1377  DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1378  return false;
1379  }
1380  if (NextAFGR64 == AFGR64ArgRegs.end()) {
1381  DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
1382  return false;
1383  }
1384  DEBUG(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
1385  Allocation.emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
1386  // Allocating an FGR32 also allocates the super-register AFGR64, and
1387  // ABI rules require us to skip the corresponding GPR32 pair.
1388  if (NextGPR32 != GPR32ArgRegs.end())
1389  NextGPR32++;
1390  if (NextGPR32 != GPR32ArgRegs.end())
1391  NextGPR32++;
1392  if (NextFGR32 != FGR32ArgRegs.end())
1393  NextFGR32++;
1394  break;
1395 
1396  default:
1397  DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
1398  return false;
1399  }
1400 
1401  ++Idx;
1402  }
1403 
1404  Idx = 0;
1405  for (const auto &FormalArg : F->args()) {
1406  unsigned SrcReg = Allocation[Idx].Reg;
1407  unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[Idx].RC);
1408  // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
1409  // Without this, EmitLiveInCopies may eliminate the livein if its only
1410  // use is a bitcast (which isn't turned into an instruction).
1411  unsigned ResultReg = createResultReg(Allocation[Idx].RC);
1412  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1413  TII.get(TargetOpcode::COPY), ResultReg)
1414  .addReg(DstReg, getKillRegState(true));
1415  updateValueMap(&FormalArg, ResultReg);
1416  ++Idx;
1417  }
1418 
1419  // Calculate the size of the incoming arguments area.
1420  // We currently reject all the cases where this would be non-zero.
1421  unsigned IncomingArgSizeInBytes = 0;
1422 
1423  // Account for the reserved argument area on ABI's that have one (O32).
1424  // It seems strange to do this on the caller side but it's necessary in
1425  // SelectionDAG's implementation.
1426  IncomingArgSizeInBytes = std::min(getABI().GetCalleeAllocdArgSizeInBytes(CC),
1427  IncomingArgSizeInBytes);
1428 
1429  MF->getInfo<MipsFunctionInfo>()->setFormalArgInfo(IncomingArgSizeInBytes,
1430  false);
1431 
1432  return true;
1433 }
1434 
1435 bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
1436  CallingConv::ID CC = CLI.CallConv;
1437  bool IsTailCall = CLI.IsTailCall;
1438  bool IsVarArg = CLI.IsVarArg;
1439  const Value *Callee = CLI.Callee;
1440  MCSymbol *Symbol = CLI.Symbol;
1441 
1442  // Do not handle FastCC.
1443  if (CC == CallingConv::Fast)
1444  return false;
1445 
1446  // Allow SelectionDAG isel to handle tail calls.
1447  if (IsTailCall)
1448  return false;
1449 
1450  // Let SDISel handle vararg functions.
1451  if (IsVarArg)
1452  return false;
1453 
1454  // FIXME: Only handle *simple* calls for now.
1455  MVT RetVT;
1456  if (CLI.RetTy->isVoidTy())
1457  RetVT = MVT::isVoid;
1458  else if (!isTypeSupported(CLI.RetTy, RetVT))
1459  return false;
1460 
1461  for (auto Flag : CLI.OutFlags)
1462  if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
1463  return false;
1464 
1465  // Set up the argument vectors.
1466  SmallVector<MVT, 16> OutVTs;
1467  OutVTs.reserve(CLI.OutVals.size());
1468 
1469  for (auto *Val : CLI.OutVals) {
1470  MVT VT;
1471  if (!isTypeLegal(Val->getType(), VT) &&
1472  !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
1473  return false;
1474 
1475  // We don't handle vector parameters yet.
1476  if (VT.isVector() || VT.getSizeInBits() > 64)
1477  return false;
1478 
1479  OutVTs.push_back(VT);
1480  }
1481 
1482  Address Addr;
1483  if (!computeCallAddress(Callee, Addr))
1484  return false;
1485 
1486  // Handle the arguments now that we've gotten them.
1487  unsigned NumBytes;
1488  if (!processCallArgs(CLI, OutVTs, NumBytes))
1489  return false;
1490 
1491  if (!Addr.getGlobalValue())
1492  return false;
1493 
1494  // Issue the call.
1495  unsigned DestAddress;
1496  if (Symbol)
1497  DestAddress = materializeExternalCallSym(Symbol);
1498  else
1499  DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32);
1500  emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress);
1501  MachineInstrBuilder MIB =
1502  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::JALR),
1503  Mips::RA).addReg(Mips::T9);
1504 
1505  // Add implicit physical register uses to the call.
1506  for (auto Reg : CLI.OutRegs)
1508 
1509  // Add a register mask with the call-preserved registers.
1510  // Proper defs for return values will be added by setPhysRegsDeadExcept().
1511  MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
1512 
1513  CLI.Call = MIB;
1514 
1515  // Finish off the call including any return values.
1516  return finishCall(CLI, RetVT, NumBytes);
1517 }
1518 
1519 bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
1520  switch (II->getIntrinsicID()) {
1521  default:
1522  return false;
1523  case Intrinsic::bswap: {
1524  Type *RetTy = II->getCalledFunction()->getReturnType();
1525 
1526  MVT VT;
1527  if (!isTypeSupported(RetTy, VT))
1528  return false;
1529 
1530  unsigned SrcReg = getRegForValue(II->getOperand(0));
1531  if (SrcReg == 0)
1532  return false;
1533  unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1534  if (DestReg == 0)
1535  return false;
1536  if (VT == MVT::i16) {
1537  if (Subtarget->hasMips32r2()) {
1538  emitInst(Mips::WSBH, DestReg).addReg(SrcReg);
1539  updateValueMap(II, DestReg);
1540  return true;
1541  } else {
1542  unsigned TempReg[3];
1543  for (int i = 0; i < 3; i++) {
1544  TempReg[i] = createResultReg(&Mips::GPR32RegClass);
1545  if (TempReg[i] == 0)
1546  return false;
1547  }
1548  emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8);
1549  emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(8);
1550  emitInst(Mips::OR, TempReg[2]).addReg(TempReg[0]).addReg(TempReg[1]);
1551  emitInst(Mips::ANDi, DestReg).addReg(TempReg[2]).addImm(0xFFFF);
1552  updateValueMap(II, DestReg);
1553  return true;
1554  }
1555  } else if (VT == MVT::i32) {
1556  if (Subtarget->hasMips32r2()) {
1557  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1558  emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
1559  emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
1560  updateValueMap(II, DestReg);
1561  return true;
1562  } else {
1563  unsigned TempReg[8];
1564  for (int i = 0; i < 8; i++) {
1565  TempReg[i] = createResultReg(&Mips::GPR32RegClass);
1566  if (TempReg[i] == 0)
1567  return false;
1568  }
1569 
1570  emitInst(Mips::SRL, TempReg[0]).addReg(SrcReg).addImm(8);
1571  emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(24);
1572  emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[0]).addImm(0xFF00);
1573  emitInst(Mips::OR, TempReg[3]).addReg(TempReg[1]).addReg(TempReg[2]);
1574 
1575  emitInst(Mips::ANDi, TempReg[4]).addReg(SrcReg).addImm(0xFF00);
1576  emitInst(Mips::SLL, TempReg[5]).addReg(TempReg[4]).addImm(8);
1577 
1578  emitInst(Mips::SLL, TempReg[6]).addReg(SrcReg).addImm(24);
1579  emitInst(Mips::OR, TempReg[7]).addReg(TempReg[3]).addReg(TempReg[5]);
1580  emitInst(Mips::OR, DestReg).addReg(TempReg[6]).addReg(TempReg[7]);
1581  updateValueMap(II, DestReg);
1582  return true;
1583  }
1584  }
1585  return false;
1586  }
1587  case Intrinsic::memcpy:
1588  case Intrinsic::memmove: {
1589  const auto *MTI = cast<MemTransferInst>(II);
1590  // Don't handle volatile.
1591  if (MTI->isVolatile())
1592  return false;
1593  if (!MTI->getLength()->getType()->isIntegerTy(32))
1594  return false;
1595  const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
1596  return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
1597  }
1598  case Intrinsic::memset: {
1599  const MemSetInst *MSI = cast<MemSetInst>(II);
1600  // Don't handle volatile.
1601  if (MSI->isVolatile())
1602  return false;
1603  if (!MSI->getLength()->getType()->isIntegerTy(32))
1604  return false;
1605  return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
1606  }
1607  }
1608  return false;
1609 }
1610 
1611 bool MipsFastISel::selectRet(const Instruction *I) {
1612  const Function &F = *I->getParent()->getParent();
1613  const ReturnInst *Ret = cast<ReturnInst>(I);
1614 
1615  DEBUG(dbgs() << "selectRet\n");
1616 
1617  if (!FuncInfo.CanLowerReturn)
1618  return false;
1619 
1620  // Build a list of return value registers.
1621  SmallVector<unsigned, 4> RetRegs;
1622 
1623  if (Ret->getNumOperands() > 0) {
1625 
1626  // Do not handle FastCC.
1627  if (CC == CallingConv::Fast)
1628  return false;
1629 
1631  GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1632 
1633  // Analyze operands of the call, assigning locations to each operand.
1635  MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
1636  I->getContext());
1637  CCAssignFn *RetCC = RetCC_Mips;
1638  CCInfo.AnalyzeReturn(Outs, RetCC);
1639 
1640  // Only handle a single return value for now.
1641  if (ValLocs.size() != 1)
1642  return false;
1643 
1644  CCValAssign &VA = ValLocs[0];
1645  const Value *RV = Ret->getOperand(0);
1646 
1647  // Don't bother handling odd stuff for now.
1648  if ((VA.getLocInfo() != CCValAssign::Full) &&
1649  (VA.getLocInfo() != CCValAssign::BCvt))
1650  return false;
1651 
1652  // Only handle register returns for now.
1653  if (!VA.isRegLoc())
1654  return false;
1655 
1656  unsigned Reg = getRegForValue(RV);
1657  if (Reg == 0)
1658  return false;
1659 
1660  unsigned SrcReg = Reg + VA.getValNo();
1661  unsigned DestReg = VA.getLocReg();
1662  // Avoid a cross-class copy. This is very unlikely.
1663  if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1664  return false;
1665 
1666  EVT RVEVT = TLI.getValueType(DL, RV->getType());
1667  if (!RVEVT.isSimple())
1668  return false;
1669 
1670  if (RVEVT.isVector())
1671  return false;
1672 
1673  MVT RVVT = RVEVT.getSimpleVT();
1674  if (RVVT == MVT::f128)
1675  return false;
1676 
1677  // Do not handle FGR64 returns for now.
1678  if (RVVT == MVT::f64 && UnsupportedFPMode) {
1679  DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
1680  return false;
1681  }
1682 
1683  MVT DestVT = VA.getValVT();
1684  // Special handling for extended integers.
1685  if (RVVT != DestVT) {
1686  if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1687  return false;
1688 
1689  if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
1690  bool IsZExt = Outs[0].Flags.isZExt();
1691  SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
1692  if (SrcReg == 0)
1693  return false;
1694  }
1695  }
1696 
1697  // Make the copy.
1698  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1699  TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1700 
1701  // Add register to return instruction.
1702  RetRegs.push_back(VA.getLocReg());
1703  }
1704  MachineInstrBuilder MIB = emitInst(Mips::RetRA);
1705  for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1706  MIB.addReg(RetRegs[i], RegState::Implicit);
1707  return true;
1708 }
1709 
1710 bool MipsFastISel::selectTrunc(const Instruction *I) {
1711  // The high bits for a type smaller than the register size are assumed to be
1712  // undefined.
1713  Value *Op = I->getOperand(0);
1714 
1715  EVT SrcVT, DestVT;
1716  SrcVT = TLI.getValueType(DL, Op->getType(), true);
1717  DestVT = TLI.getValueType(DL, I->getType(), true);
1718 
1719  if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1720  return false;
1721  if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1722  return false;
1723 
1724  unsigned SrcReg = getRegForValue(Op);
1725  if (!SrcReg)
1726  return false;
1727 
1728  // Because the high bits are undefined, a truncate doesn't generate
1729  // any code.
1730  updateValueMap(I, SrcReg);
1731  return true;
1732 }
1733 bool MipsFastISel::selectIntExt(const Instruction *I) {
1734  Type *DestTy = I->getType();
1735  Value *Src = I->getOperand(0);
1736  Type *SrcTy = Src->getType();
1737 
1738  bool isZExt = isa<ZExtInst>(I);
1739  unsigned SrcReg = getRegForValue(Src);
1740  if (!SrcReg)
1741  return false;
1742 
1743  EVT SrcEVT, DestEVT;
1744  SrcEVT = TLI.getValueType(DL, SrcTy, true);
1745  DestEVT = TLI.getValueType(DL, DestTy, true);
1746  if (!SrcEVT.isSimple())
1747  return false;
1748  if (!DestEVT.isSimple())
1749  return false;
1750 
1751  MVT SrcVT = SrcEVT.getSimpleVT();
1752  MVT DestVT = DestEVT.getSimpleVT();
1753  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1754 
1755  if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
1756  return false;
1757  updateValueMap(I, ResultReg);
1758  return true;
1759 }
1760 bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1761  unsigned DestReg) {
1762  unsigned ShiftAmt;
1763  switch (SrcVT.SimpleTy) {
1764  default:
1765  return false;
1766  case MVT::i8:
1767  ShiftAmt = 24;
1768  break;
1769  case MVT::i16:
1770  ShiftAmt = 16;
1771  break;
1772  }
1773  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1774  emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
1775  emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
1776  return true;
1777 }
1778 
1779 bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1780  unsigned DestReg) {
1781  switch (SrcVT.SimpleTy) {
1782  default:
1783  return false;
1784  case MVT::i8:
1785  emitInst(Mips::SEB, DestReg).addReg(SrcReg);
1786  break;
1787  case MVT::i16:
1788  emitInst(Mips::SEH, DestReg).addReg(SrcReg);
1789  break;
1790  }
1791  return true;
1792 }
1793 
1794 bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1795  unsigned DestReg) {
1796  if ((DestVT != MVT::i32) && (DestVT != MVT::i16))
1797  return false;
1798  if (Subtarget->hasMips32r2())
1799  return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg);
1800  return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg);
1801 }
1802 
1803 bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1804  unsigned DestReg) {
1805  int64_t Imm;
1806 
1807  switch (SrcVT.SimpleTy) {
1808  default:
1809  return false;
1810  case MVT::i1:
1811  Imm = 1;
1812  break;
1813  case MVT::i8:
1814  Imm = 0xff;
1815  break;
1816  case MVT::i16:
1817  Imm = 0xffff;
1818  break;
1819  }
1820 
1821  emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(Imm);
1822  return true;
1823 }
1824 
1825 bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1826  unsigned DestReg, bool IsZExt) {
1827  // FastISel does not have plumbing to deal with extensions where the SrcVT or
1828  // DestVT are odd things, so test to make sure that they are both types we can
1829  // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
1830  // bail out to SelectionDAG.
1831  if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && (DestVT != MVT::i32)) ||
1832  ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && (SrcVT != MVT::i16)))
1833  return false;
1834  if (IsZExt)
1835  return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg);
1836  return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg);
1837 }
1838 
1839 unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1840  bool isZExt) {
1841  unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1842  bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
1843  return Success ? DestReg : 0;
1844 }
1845 
1846 bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) {
1847  EVT DestEVT = TLI.getValueType(DL, I->getType(), true);
1848  if (!DestEVT.isSimple())
1849  return false;
1850 
1851  MVT DestVT = DestEVT.getSimpleVT();
1852  if (DestVT != MVT::i32)
1853  return false;
1854 
1855  unsigned DivOpc;
1856  switch (ISDOpcode) {
1857  default:
1858  return false;
1859  case ISD::SDIV:
1860  case ISD::SREM:
1861  DivOpc = Mips::SDIV;
1862  break;
1863  case ISD::UDIV:
1864  case ISD::UREM:
1865  DivOpc = Mips::UDIV;
1866  break;
1867  }
1868 
1869  unsigned Src0Reg = getRegForValue(I->getOperand(0));
1870  unsigned Src1Reg = getRegForValue(I->getOperand(1));
1871  if (!Src0Reg || !Src1Reg)
1872  return false;
1873 
1874  emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
1875  emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
1876 
1877  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1878  if (!ResultReg)
1879  return false;
1880 
1881  unsigned MFOpc = (ISDOpcode == ISD::SREM || ISDOpcode == ISD::UREM)
1882  ? Mips::MFHI
1883  : Mips::MFLO;
1884  emitInst(MFOpc, ResultReg);
1885 
1886  updateValueMap(I, ResultReg);
1887  return true;
1888 }
1889 
1890 bool MipsFastISel::selectShift(const Instruction *I) {
1891  MVT RetVT;
1892 
1893  if (!isTypeSupported(I->getType(), RetVT))
1894  return false;
1895 
1896  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1897  if (!ResultReg)
1898  return false;
1899 
1900  unsigned Opcode = I->getOpcode();
1901  const Value *Op0 = I->getOperand(0);
1902  unsigned Op0Reg = getRegForValue(Op0);
1903  if (!Op0Reg)
1904  return false;
1905 
1906  // If AShr or LShr, then we need to make sure the operand0 is sign extended.
1907  if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) {
1908  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1909  if (!TempReg)
1910  return false;
1911 
1912  MVT Op0MVT = TLI.getValueType(DL, Op0->getType(), true).getSimpleVT();
1913  bool IsZExt = Opcode == Instruction::LShr;
1914  if (!emitIntExt(Op0MVT, Op0Reg, MVT::i32, TempReg, IsZExt))
1915  return false;
1916 
1917  Op0Reg = TempReg;
1918  }
1919 
1920  if (const auto *C = dyn_cast<ConstantInt>(I->getOperand(1))) {
1921  uint64_t ShiftVal = C->getZExtValue();
1922 
1923  switch (Opcode) {
1924  default:
1925  llvm_unreachable("Unexpected instruction.");
1926  case Instruction::Shl:
1927  Opcode = Mips::SLL;
1928  break;
1929  case Instruction::AShr:
1930  Opcode = Mips::SRA;
1931  break;
1932  case Instruction::LShr:
1933  Opcode = Mips::SRL;
1934  break;
1935  }
1936 
1937  emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal);
1938  updateValueMap(I, ResultReg);
1939  return true;
1940  }
1941 
1942  unsigned Op1Reg = getRegForValue(I->getOperand(1));
1943  if (!Op1Reg)
1944  return false;
1945 
1946  switch (Opcode) {
1947  default:
1948  llvm_unreachable("Unexpected instruction.");
1949  case Instruction::Shl:
1950  Opcode = Mips::SLLV;
1951  break;
1952  case Instruction::AShr:
1953  Opcode = Mips::SRAV;
1954  break;
1955  case Instruction::LShr:
1956  Opcode = Mips::SRLV;
1957  break;
1958  }
1959 
1960  emitInst(Opcode, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
1961  updateValueMap(I, ResultReg);
1962  return true;
1963 }
1964 
1965 bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
1966  switch (I->getOpcode()) {
1967  default:
1968  break;
1969  case Instruction::Load:
1970  return selectLoad(I);
1971  case Instruction::Store:
1972  return selectStore(I);
1973  case Instruction::SDiv:
1974  if (!selectBinaryOp(I, ISD::SDIV))
1975  return selectDivRem(I, ISD::SDIV);
1976  return true;
1977  case Instruction::UDiv:
1978  if (!selectBinaryOp(I, ISD::UDIV))
1979  return selectDivRem(I, ISD::UDIV);
1980  return true;
1981  case Instruction::SRem:
1982  if (!selectBinaryOp(I, ISD::SREM))
1983  return selectDivRem(I, ISD::SREM);
1984  return true;
1985  case Instruction::URem:
1986  if (!selectBinaryOp(I, ISD::UREM))
1987  return selectDivRem(I, ISD::UREM);
1988  return true;
1989  case Instruction::Shl:
1990  case Instruction::LShr:
1991  case Instruction::AShr:
1992  return selectShift(I);
1993  case Instruction::And:
1994  case Instruction::Or:
1995  case Instruction::Xor:
1996  return selectLogicalOp(I);
1997  case Instruction::Br:
1998  return selectBranch(I);
1999  case Instruction::Ret:
2000  return selectRet(I);
2001  case Instruction::Trunc:
2002  return selectTrunc(I);
2003  case Instruction::ZExt:
2004  case Instruction::SExt:
2005  return selectIntExt(I);
2006  case Instruction::FPTrunc:
2007  return selectFPTrunc(I);
2008  case Instruction::FPExt:
2009  return selectFPExt(I);
2010  case Instruction::FPToSI:
2011  return selectFPToInt(I, /*isSigned*/ true);
2012  case Instruction::FPToUI:
2013  return selectFPToInt(I, /*isSigned*/ false);
2014  case Instruction::ICmp:
2015  case Instruction::FCmp:
2016  return selectCmp(I);
2017  case Instruction::Select:
2018  return selectSelect(I);
2019  }
2020  return false;
2021 }
2022 
2023 unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
2024  bool IsUnsigned) {
2025  unsigned VReg = getRegForValue(V);
2026  if (VReg == 0)
2027  return 0;
2028  MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT();
2029  if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
2030  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
2031  if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
2032  return 0;
2033  VReg = TempReg;
2034  }
2035  return VReg;
2036 }
2037 
2038 void MipsFastISel::simplifyAddress(Address &Addr) {
2039  if (!isInt<16>(Addr.getOffset())) {
2040  unsigned TempReg =
2041  materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
2042  unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
2043  emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
2044  Addr.setReg(DestReg);
2045  Addr.setOffset(0);
2046  }
2047 }
2048 
2049 unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2050  const TargetRegisterClass *RC,
2051  unsigned Op0, bool Op0IsKill,
2052  unsigned Op1, bool Op1IsKill) {
2053  // We treat the MUL instruction in a special way because it clobbers
2054  // the HI0 & LO0 registers. The TableGen definition of this instruction can
2055  // mark these registers only as implicitly defined. As a result, the
2056  // register allocator runs out of registers when this instruction is
2057  // followed by another instruction that defines the same registers too.
2058  // We can fix this by explicitly marking those registers as dead.
2059  if (MachineInstOpcode == Mips::MUL) {
2060  unsigned ResultReg = createResultReg(RC);
2061  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2062  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2063  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2064  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2065  .addReg(Op0, getKillRegState(Op0IsKill))
2066  .addReg(Op1, getKillRegState(Op1IsKill))
2069  return ResultReg;
2070  }
2071 
2072  return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op0IsKill, Op1,
2073  Op1IsKill);
2074 }
2075 
2076 namespace llvm {
2078  const TargetLibraryInfo *libInfo) {
2079  return new MipsFastISel(funcInfo, libInfo);
2080 }
2081 }
Return a value (possibly void), from a function.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:870
MVT getValVT() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:226
LLVMContext & Context
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1309
size_t i
LocInfo getLocInfo() const
bool isVolatile() const
unsigned constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, unsigned Reg, unsigned OpIdx)
Try to constrain Reg so that it is usable by argument OpIdx of the provided MCInstrDesc II...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:39
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:216
unsigned getNumOperands() const
Definition: User.h:167
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
iterator end() const
Definition: ArrayRef.h:130
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
unsigned less or equal
Definition: InstrTypes.h:906
unsigned less than
Definition: InstrTypes.h:905
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:886
unsigned getSizeInBits() const
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:148
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:896
This class wraps the llvm.memset intrinsic.
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.cpp:238
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:100
constexpr bool isInt< 16 >(int64_t x)
Definition: MathExtras.h:271
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:664
void reserve(size_type N)
Definition: SmallVector.h:377
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Definition: Attributes.cpp:994
unsigned getValNo() const
op_iterator op_begin()
Definition: User.h:205
bool isRegLoc() const
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:165
bool isVector() const
isVector - Return true if this is a vector value type.
Definition: ValueTypes.h:133
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:496
A description of a memory reference used in the backend.
std::string getEVTString() const
getEVTString - This function returns value type as a string, e.g.
Definition: ValueTypes.cpp:120
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
This class represents the LLVM 'select' instruction.
struct fuzzer::@269 Flags
const HexagonInstrInfo * TII
Class to represent struct types.
Definition: DerivedTypes.h:199
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
bool hasInternalLinkage() const
Definition: GlobalValue.h:413
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
unsigned getNumArgOperands() const
Return the number of call arguments.
Reg
All possible values of the reg field in the ModR/M byte.
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:887
SimpleValueType SimpleTy
APInt bitcastToAPInt() const
Definition: APFloat.h:1012
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
unsigned getStoreSize() const
getStoreSize - Return the number of bytes overwritten by a store of the specified value type...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
unsigned getLocReg() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:31
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:873
#define F(x, y, z)
Definition: MD5.cpp:51
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
BasicBlock * getSuccessor(unsigned i) const
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:210
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset)
Stack pointer relative access.
unsigned getKillRegState(bool B)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:121
TargetInstrInfo - Interface to description of machine instruction set.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:517
Local Stack Slot Allocation
INITIALIZE_PASS(HexagonEarlyIfConversion,"hexagon-eif","Hexagon early if conversion", false, false) bool HexagonEarlyIfConversion MachineBasicBlock * SB
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_ADD)...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
#define P(N)
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
unsigned const MachineRegisterInfo * MRI
MVT - Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:48
Conditional or Unconditional Branch instruction.
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:219
MVT getLocVT() const
This is an important base class in LLVM.
Definition: Constant.h:42
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
bool isVector() const
isVector - Return true if this is a vector value type.
const Value * getCondition() const
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:145
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
Definition: APInt.h:1947
static ManagedStatic< OptionRegistry > OR
Definition: Options.cpp:31
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
Definition: APInt.h:1952
op_iterator op_end()
Definition: User.h:207
uint32_t Offset
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:150
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:880
iterator begin() const
Definition: ArrayRef.h:129
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Value * getOperand(unsigned i) const
Definition: User.h:145
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:960
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:232
EVT - Extended Value Type.
Definition: ValueTypes.h:31
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:654
static bool isAtomic(Instruction *I)
const Value * getTrueValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
signed greater than
Definition: InstrTypes.h:907
bool needsCustom() const
The memory access writes data.
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:884
MO_GOT - Represents the offset into the global offset table at which the address the relocation entry...
Definition: MipsBaseInfo.h:38
CCState - This class holds information needed while lowering arguments and return values...
This is the shared class of boolean and integer constants.
Definition: Constants.h:88
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
Provides information about what library functions are available for the current target.
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, ArrayRef< MCPhysReg > F64Regs)
CCValAssign - Represent assignment of one arg/retval to a location.
const DataFlowGraph & G
Definition: RDFGraph.cpp:206
signed less than
Definition: InstrTypes.h:909
Value * getLength() const
unsigned fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
Emit a MachineInstr with two register operands and a result register in the given register class...
Definition: FastISel.cpp:1803
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
const MachineInstrBuilder & addFrameIndex(int Idx) const
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) LLVM_ATTRIBUTE_UNUSED
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:586
AttributeSet getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:176
signed less or equal
Definition: InstrTypes.h:910
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:195
This file defines the FastISel class.
bool isMemLoc() const
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
Definition: APInt.h:1942
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:207
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Value * getCondition() const
void emplace_back(ArgTypes &&...Args)
Definition: SmallVector.h:635
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:333
unsigned greater or equal
Definition: InstrTypes.h:904
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
#define I(x, y, z)
Definition: MD5.cpp:54
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:312
bool hasLocalLinkage() const
Definition: GlobalValue.h:415
const APFloat & getValueAPF() const
Definition: Constants.h:300
const unsigned Kind
bool isUnsigned() const
Determine if this instruction is using an unsigned comparison.
Definition: InstrTypes.h:1033
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:107
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:883
LLVM Value Representation.
Definition: Value.h:71
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:111
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
#define DEBUG(X)
Definition: Debug.h:100
Primary interface to the complete machine description for the target machine.
const Value * getFalseValue() const
unsigned greater than
Definition: InstrTypes.h:903
unsigned getLocMemOffset() const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:162
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:885
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.cpp:234
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
const BasicBlock * getParent() const
Definition: Instruction.h:62
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void convertToReg(unsigned RegNo)
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:226
iterator_range< arg_iterator > args()
Definition: Function.h:568
signed greater or equal
Definition: InstrTypes.h:908
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
an instruction to allocate memory on the stack
Definition: Instructions.h:60
gep_type_iterator gep_type_begin(const User *GEP)