LLVM 22.0.0git
MipsInstructionSelector.cpp
Go to the documentation of this file.
1//===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// Mips.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "MipsMachineFunction.h"
17#include "MipsTargetMachine.h"
21#include "llvm/IR/IntrinsicsMips.h"
22
23#define DEBUG_TYPE "mips-isel"
24
25using namespace llvm;
26
27namespace {
28
29#define GET_GLOBALISEL_PREDICATE_BITSET
30#include "MipsGenGlobalISel.inc"
31#undef GET_GLOBALISEL_PREDICATE_BITSET
32
33class MipsInstructionSelector : public InstructionSelector {
34public:
35 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
36 const MipsRegisterBankInfo &RBI);
37
38 bool select(MachineInstr &I) override;
39 static const char *getName() { return DEBUG_TYPE; }
40
41private:
42 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43 bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
44 bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
45 bool materialize32BitImm(Register DestReg, APInt Imm,
46 MachineIRBuilder &B) const;
49 getRegClassForTypeOnBank(Register Reg, MachineRegisterInfo &MRI) const;
50 unsigned selectLoadStoreOpCode(MachineInstr &I,
52 bool buildUnalignedStore(MachineInstr &I, unsigned Opc,
53 MachineOperand &BaseAddr, unsigned Offset,
54 MachineMemOperand *MMO) const;
55 bool buildUnalignedLoad(MachineInstr &I, unsigned Opc, Register Dest,
56 MachineOperand &BaseAddr, unsigned Offset,
57 Register TiedDest, MachineMemOperand *MMO) const;
58
59 const MipsTargetMachine &TM;
60 const MipsSubtarget &STI;
61 const MipsInstrInfo &TII;
62 const MipsRegisterInfo &TRI;
63 const MipsRegisterBankInfo &RBI;
64
65#define GET_GLOBALISEL_PREDICATES_DECL
66#include "MipsGenGlobalISel.inc"
67#undef GET_GLOBALISEL_PREDICATES_DECL
68
69#define GET_GLOBALISEL_TEMPORARIES_DECL
70#include "MipsGenGlobalISel.inc"
71#undef GET_GLOBALISEL_TEMPORARIES_DECL
72};
73
74} // end anonymous namespace
75
76#define GET_GLOBALISEL_IMPL
77#include "MipsGenGlobalISel.inc"
78#undef GET_GLOBALISEL_IMPL
79
80MipsInstructionSelector::MipsInstructionSelector(
81 const MipsTargetMachine &TM, const MipsSubtarget &STI,
82 const MipsRegisterBankInfo &RBI)
83 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
84 RBI(RBI),
85
87#include "MipsGenGlobalISel.inc"
90#include "MipsGenGlobalISel.inc"
92{
93}
94
95bool MipsInstructionSelector::isRegInGprb(Register Reg,
96 MachineRegisterInfo &MRI) const {
97 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::GPRBRegBankID;
98}
99
100bool MipsInstructionSelector::isRegInFprb(Register Reg,
101 MachineRegisterInfo &MRI) const {
102 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::FPRBRegBankID;
103}
104
105bool MipsInstructionSelector::selectCopy(MachineInstr &I,
106 MachineRegisterInfo &MRI) const {
107 Register DstReg = I.getOperand(0).getReg();
108 if (DstReg.isPhysical())
109 return true;
110
111 const TargetRegisterClass *RC = getRegClassForTypeOnBank(DstReg, MRI);
112 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
113 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
114 << " operand\n");
115 return false;
116 }
117 return true;
118}
119
120const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
121 Register Reg, MachineRegisterInfo &MRI) const {
122 const LLT Ty = MRI.getType(Reg);
123 const unsigned TySize = Ty.getSizeInBits();
124
125 if (isRegInGprb(Reg, MRI)) {
126 assert((Ty.isScalar() || Ty.isPointer()) &&
127 (TySize == 32 || TySize == 64) &&
128 "Register class not available for LLT, register bank combination");
129 if (TySize == 32)
130 return &Mips::GPR32RegClass;
131 if (TySize == 64)
132 return &Mips::GPR64RegClass;
133 }
134
135 if (isRegInFprb(Reg, MRI)) {
136 if (Ty.isScalar()) {
137 assert((TySize == 32 || TySize == 64) &&
138 "Register class not available for LLT, register bank combination");
139 if (TySize == 32)
140 return &Mips::FGR32RegClass;
141 return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
142 }
143 }
144
145 llvm_unreachable("Unsupported register bank.");
146}
147
148bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
149 MachineIRBuilder &B) const {
150 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
151 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
152 if (Imm.getHiBits(16).isZero()) {
153 MachineInstr *Inst =
154 B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
155 .addImm(Imm.getLoBits(16).getLimitedValue());
156 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
157 }
158 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
159 if (Imm.getLoBits(16).isZero()) {
160 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {})
161 .addImm(Imm.getHiBits(16).getLimitedValue());
162 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
163 }
164 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
165 if (Imm.isSignedIntN(16)) {
166 MachineInstr *Inst =
167 B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
168 .addImm(Imm.getLoBits(16).getLimitedValue());
169 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
170 }
171 // Values that cannot be materialized with single immediate instruction.
172 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
173 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
174 .addImm(Imm.getHiBits(16).getLimitedValue());
175 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
176 .addImm(Imm.getLoBits(16).getLimitedValue());
177 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
178 return false;
179 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI))
180 return false;
181 return true;
182}
183
184/// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
185unsigned
186MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
187 MachineRegisterInfo &MRI) const {
188 const Register ValueReg = I.getOperand(0).getReg();
189 const LLT Ty = MRI.getType(ValueReg);
190 const unsigned TySize = Ty.getSizeInBits();
191 const unsigned MemSizeInBytes =
192 (*I.memoperands_begin())->getSize().getValue();
193 unsigned Opc = I.getOpcode();
194 const bool isStore = Opc == TargetOpcode::G_STORE;
195
196 if (isRegInGprb(ValueReg, MRI)) {
197 assert(((Ty.isScalar() && TySize == 32) ||
198 (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) &&
199 "Unsupported register bank, LLT, MemSizeInBytes combination");
200 (void)TySize;
201 if (isStore)
202 switch (MemSizeInBytes) {
203 case 4:
204 return Mips::SW;
205 case 2:
206 return Mips::SH;
207 case 1:
208 return Mips::SB;
209 default:
210 return Opc;
211 }
212 else
213 // Unspecified extending load is selected into zeroExtending load.
214 switch (MemSizeInBytes) {
215 case 4:
216 return Mips::LW;
217 case 2:
218 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
219 case 1:
220 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
221 default:
222 return Opc;
223 }
224 }
225
226 if (isRegInFprb(ValueReg, MRI)) {
227 if (Ty.isScalar()) {
228 assert(((TySize == 32 && MemSizeInBytes == 4) ||
229 (TySize == 64 && MemSizeInBytes == 8)) &&
230 "Unsupported register bank, LLT, MemSizeInBytes combination");
231
232 if (MemSizeInBytes == 4)
233 return isStore ? Mips::SWC1 : Mips::LWC1;
234
235 if (STI.isFP64bit())
236 return isStore ? Mips::SDC164 : Mips::LDC164;
237 return isStore ? Mips::SDC1 : Mips::LDC1;
238 }
239
240 if (Ty.isVector()) {
241 assert(STI.hasMSA() && "Vector instructions require target with MSA.");
242 assert((TySize == 128 && MemSizeInBytes == 16) &&
243 "Unsupported register bank, LLT, MemSizeInBytes combination");
244 switch (Ty.getElementType().getSizeInBits()) {
245 case 8:
246 return isStore ? Mips::ST_B : Mips::LD_B;
247 case 16:
248 return isStore ? Mips::ST_H : Mips::LD_H;
249 case 32:
250 return isStore ? Mips::ST_W : Mips::LD_W;
251 case 64:
252 return isStore ? Mips::ST_D : Mips::LD_D;
253 default:
254 return Opc;
255 }
256 }
257 }
258
259 return Opc;
260}
261
262bool MipsInstructionSelector::buildUnalignedStore(
263 MachineInstr &I, unsigned Opc, MachineOperand &BaseAddr, unsigned Offset,
264 MachineMemOperand *MMO) const {
265 MachineInstr *NewInst =
266 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
267 .add(I.getOperand(0))
268 .add(BaseAddr)
269 .addImm(Offset)
270 .addMemOperand(MMO);
271 if (!constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI))
272 return false;
273 return true;
274}
275
276bool MipsInstructionSelector::buildUnalignedLoad(
277 MachineInstr &I, unsigned Opc, Register Dest, MachineOperand &BaseAddr,
278 unsigned Offset, Register TiedDest, MachineMemOperand *MMO) const {
279 MachineInstr *NewInst =
280 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
281 .addDef(Dest)
282 .add(BaseAddr)
283 .addImm(Offset)
284 .addUse(TiedDest)
285 .addMemOperand(*I.memoperands_begin());
286 if (!constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI))
287 return false;
288 return true;
289}
290
291bool MipsInstructionSelector::select(MachineInstr &I) {
292
293 MachineBasicBlock &MBB = *I.getParent();
294 MachineFunction &MF = *MBB.getParent();
295 MachineRegisterInfo &MRI = MF.getRegInfo();
296
297 if (!isPreISelGenericOpcode(I.getOpcode())) {
298 if (I.isCopy())
299 return selectCopy(I, MRI);
300
301 return true;
302 }
303
304 if (I.getOpcode() == Mips::G_MUL &&
305 isRegInGprb(I.getOperand(0).getReg(), MRI)) {
306 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL))
307 .add(I.getOperand(0))
308 .add(I.getOperand(1))
309 .add(I.getOperand(2));
311 return false;
312 Mul->getOperand(3).setIsDead(true);
313 Mul->getOperand(4).setIsDead(true);
314
315 I.eraseFromParent();
316 return true;
317 }
318
319 if (selectImpl(I, *CoverageInfo))
320 return true;
321
322 MachineInstr *MI = nullptr;
323 using namespace TargetOpcode;
324
325 switch (I.getOpcode()) {
326 case G_UMULH: {
327 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
328 MachineInstr *PseudoMULTu, *PseudoMove;
329
330 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
331 .addDef(PseudoMULTuReg)
332 .add(I.getOperand(1))
333 .add(I.getOperand(2));
334 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI))
335 return false;
336
337 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI))
338 .addDef(I.getOperand(0).getReg())
339 .addUse(PseudoMULTuReg);
340 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
341 return false;
342
343 I.eraseFromParent();
344 return true;
345 }
346 case G_PTR_ADD: {
347 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
348 .add(I.getOperand(0))
349 .add(I.getOperand(1))
350 .add(I.getOperand(2));
351 break;
352 }
353 case G_INTTOPTR:
354 case G_PTRTOINT: {
355 I.setDesc(TII.get(COPY));
356 return selectCopy(I, MRI);
357 }
358 case G_FRAME_INDEX: {
359 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
360 .add(I.getOperand(0))
361 .add(I.getOperand(1))
362 .addImm(0);
363 break;
364 }
365 case G_BRJT: {
366 unsigned EntrySize =
368 assert(isPowerOf2_32(EntrySize) &&
369 "Non-power-of-two jump-table entry size not supported.");
370
371 Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass);
372 MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL))
373 .addDef(JTIndex)
374 .addUse(I.getOperand(2).getReg())
375 .addImm(Log2_32(EntrySize));
376 if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI))
377 return false;
378
379 Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass);
380 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
381 .addDef(DestAddress)
382 .addUse(I.getOperand(0).getReg())
383 .addUse(JTIndex);
384 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
385 return false;
386
387 Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass);
388 MachineInstr *LW =
389 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
390 .addDef(Dest)
391 .addUse(DestAddress)
392 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO)
394 MachinePointerInfo(), MachineMemOperand::MOLoad, 4, Align(4)));
396 return false;
397
398 if (MF.getTarget().isPositionIndependent()) {
399 Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
400 LW->getOperand(0).setReg(DestTmp);
401 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
402 .addDef(Dest)
403 .addUse(DestTmp)
404 .addUse(MF.getInfo<MipsFunctionInfo>()
405 ->getGlobalBaseRegForGlobalISel(MF));
406 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
407 return false;
408 }
409
410 MachineInstr *Branch =
411 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
412 .addUse(Dest);
413 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
414 return false;
415
416 I.eraseFromParent();
417 return true;
418 }
419 case G_BRINDIRECT: {
420 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
421 .add(I.getOperand(0));
422 break;
423 }
424 case G_PHI: {
425 const Register DestReg = I.getOperand(0).getReg();
426
427 const TargetRegisterClass *DefRC = nullptr;
428 if (DestReg.isPhysical())
429 DefRC = TRI.getRegClass(DestReg);
430 else
431 DefRC = getRegClassForTypeOnBank(DestReg, MRI);
432
433 I.setDesc(TII.get(TargetOpcode::PHI));
434 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI);
435 }
436 case G_STORE:
437 case G_LOAD:
438 case G_ZEXTLOAD:
439 case G_SEXTLOAD: {
440 auto MMO = *I.memoperands_begin();
441 MachineOperand BaseAddr = I.getOperand(1);
442 int64_t SignedOffset = 0;
443 // Try to fold load/store + G_PTR_ADD + G_CONSTANT
444 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
445 // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
446 // %LoadResult/%StoreSrc = load/store %Addr(p0)
447 // into:
448 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
449
450 MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
451 if (Addr->getOpcode() == G_PTR_ADD) {
452 MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
453 if (Offset->getOpcode() == G_CONSTANT) {
454 APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
455 if (OffsetValue.isSignedIntN(16)) {
456 BaseAddr = Addr->getOperand(1);
457 SignedOffset = OffsetValue.getSExtValue();
458 }
459 }
460 }
461
462 // Unaligned memory access
463 if ((!MMO->getSize().hasValue() ||
464 MMO->getAlign() < MMO->getSize().getValue()) &&
466 if (MMO->getSize() != 4 || !isRegInGprb(I.getOperand(0).getReg(), MRI))
467 return false;
468
469 if (I.getOpcode() == G_STORE) {
470 if (!buildUnalignedStore(I, Mips::SWL, BaseAddr, SignedOffset + 3, MMO))
471 return false;
472 if (!buildUnalignedStore(I, Mips::SWR, BaseAddr, SignedOffset, MMO))
473 return false;
474 I.eraseFromParent();
475 return true;
476 }
477
478 if (I.getOpcode() == G_LOAD) {
479 Register ImplDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
480 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
481 .addDef(ImplDef);
482 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
483 if (!buildUnalignedLoad(I, Mips::LWL, Tmp, BaseAddr, SignedOffset + 3,
484 ImplDef, MMO))
485 return false;
486 if (!buildUnalignedLoad(I, Mips::LWR, I.getOperand(0).getReg(),
487 BaseAddr, SignedOffset, Tmp, MMO))
488 return false;
489 I.eraseFromParent();
490 return true;
491 }
492
493 return false;
494 }
495
496 const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
497 if (NewOpc == I.getOpcode())
498 return false;
499
500 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
501 .add(I.getOperand(0))
502 .add(BaseAddr)
503 .addImm(SignedOffset)
504 .addMemOperand(MMO);
505 break;
506 }
507 case G_UDIV:
508 case G_UREM:
509 case G_SDIV:
510 case G_SREM: {
511 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
512 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
513 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
514
515 MachineInstr *PseudoDIV, *PseudoMove;
516 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
517 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
518 .addDef(HILOReg)
519 .add(I.getOperand(1))
520 .add(I.getOperand(2));
521 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
522 return false;
523
524 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
525 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
526 .addDef(I.getOperand(0).getReg())
527 .addUse(HILOReg);
528 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
529 return false;
530
531 I.eraseFromParent();
532 return true;
533 }
534 case G_SELECT: {
535 // Handle operands with pointer type.
536 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
537 .add(I.getOperand(0))
538 .add(I.getOperand(2))
539 .add(I.getOperand(1))
540 .add(I.getOperand(3));
541 break;
542 }
543 case G_UNMERGE_VALUES: {
544 if (I.getNumOperands() != 3)
545 return false;
546 Register Src = I.getOperand(2).getReg();
547 Register Lo = I.getOperand(0).getReg();
548 Register Hi = I.getOperand(1).getReg();
549 if (!isRegInFprb(Src, MRI) ||
550 !(isRegInGprb(Lo, MRI) && isRegInGprb(Hi, MRI)))
551 return false;
552
553 unsigned Opcode =
554 STI.isFP64bit() ? Mips::ExtractElementF64_64 : Mips::ExtractElementF64;
555
556 MachineInstr *ExtractLo = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
557 .addDef(Lo)
558 .addUse(Src)
559 .addImm(0);
560 if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
561 return false;
562
563 MachineInstr *ExtractHi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
564 .addDef(Hi)
565 .addUse(Src)
566 .addImm(1);
567 if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
568 return false;
569
570 I.eraseFromParent();
571 return true;
572 }
573 case G_IMPLICIT_DEF: {
574 Register Dst = I.getOperand(0).getReg();
575 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
576 .addDef(Dst);
577
578 // Set class based on register bank, there can be fpr and gpr implicit def.
579 MRI.setRegClass(Dst, getRegClassForTypeOnBank(Dst, MRI));
580 break;
581 }
582 case G_CONSTANT: {
583 MachineIRBuilder B(I);
584 if (!materialize32BitImm(I.getOperand(0).getReg(),
585 I.getOperand(1).getCImm()->getValue(), B))
586 return false;
587
588 I.eraseFromParent();
589 return true;
590 }
591 case G_FCONSTANT: {
592 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF();
593 APInt APImm = FPimm.bitcastToAPInt();
594 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
595
596 if (Size == 32) {
597 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
598 MachineIRBuilder B(I);
599 if (!materialize32BitImm(GPRReg, APImm, B))
600 return false;
601
602 MachineInstrBuilder MTC1 =
603 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
604 if (!MTC1.constrainAllUses(TII, TRI, RBI))
605 return false;
606 }
607 if (Size == 64) {
608 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
609 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
610 MachineIRBuilder B(I);
611 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
612 return false;
613 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B))
614 return false;
615
616 MachineInstrBuilder PairF64 = B.buildInstr(
617 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
618 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
619 if (!PairF64.constrainAllUses(TII, TRI, RBI))
620 return false;
621 }
622
623 I.eraseFromParent();
624 return true;
625 }
626 case G_FABS: {
627 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
628 unsigned FABSOpcode =
629 Size == 32 ? Mips::FABS_S
630 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
631 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode))
632 .add(I.getOperand(0))
633 .add(I.getOperand(1));
634 break;
635 }
636 case G_FPTOSI: {
637 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits();
638 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
639 (void)ToSize;
640 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
641 assert((FromSize == 32 || FromSize == 64) &&
642 "Unsupported floating point size for G_FPTOSI");
643
644 unsigned Opcode;
645 if (FromSize == 32)
646 Opcode = Mips::TRUNC_W_S;
647 else
648 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
649 Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
650 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
651 .addDef(ResultInFPR)
652 .addUse(I.getOperand(1).getReg());
653 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI))
654 return false;
655
656 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1))
657 .addDef(I.getOperand(0).getReg())
658 .addUse(ResultInFPR);
659 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
660 return false;
661
662 I.eraseFromParent();
663 return true;
664 }
665 case G_GLOBAL_VALUE: {
666 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
667 if (MF.getTarget().isPositionIndependent()) {
668 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
669 .addDef(I.getOperand(0).getReg())
670 .addReg(MF.getInfo<MipsFunctionInfo>()
671 ->getGlobalBaseRegForGlobalISel(MF))
672 .addGlobalAddress(GVal);
673 // Global Values that don't have local linkage are handled differently
674 // when they are part of call sequence. MipsCallLowering::lowerCall
675 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
676 // MO_GOT_CALL flag when Callee doesn't have local linkage.
677 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL)
679 else
681 LWGOT->addMemOperand(
684 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI))
685 return false;
686
687 if (GVal->hasLocalLinkage()) {
688 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
689 LWGOT->getOperand(0).setReg(LWGOTDef);
690
691 MachineInstr *ADDiu =
692 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
693 .addDef(I.getOperand(0).getReg())
694 .addReg(LWGOTDef)
695 .addGlobalAddress(GVal);
697 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
698 return false;
699 }
700 } else {
701 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
702
703 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
704 .addDef(LUiReg)
705 .addGlobalAddress(GVal);
707 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
708 return false;
709
710 MachineInstr *ADDiu =
711 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
712 .addDef(I.getOperand(0).getReg())
713 .addUse(LUiReg)
714 .addGlobalAddress(GVal);
716 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
717 return false;
718 }
719 I.eraseFromParent();
720 return true;
721 }
722 case G_JUMP_TABLE: {
723 if (MF.getTarget().isPositionIndependent()) {
724 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
725 .addDef(I.getOperand(0).getReg())
726 .addReg(MF.getInfo<MipsFunctionInfo>()
727 ->getGlobalBaseRegForGlobalISel(MF))
728 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT)
731 Align(4)));
732 } else {
733 MI =
734 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
735 .addDef(I.getOperand(0).getReg())
736 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI);
737 }
738 break;
739 }
740 case G_ICMP: {
741 struct Instr {
742 unsigned Opcode;
744 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
745 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
746
747 bool hasImm() const {
748 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
749 return true;
750 return false;
751 }
752 };
753
755 Register ICMPReg = I.getOperand(0).getReg();
756 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
757 Register LHS = I.getOperand(2).getReg();
758 Register RHS = I.getOperand(3).getReg();
760 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
761
762 switch (Cond) {
763 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
764 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
765 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
766 break;
767 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
768 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
769 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
770 break;
771 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS
772 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS);
773 break;
774 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
775 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS);
776 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
777 break;
778 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS
779 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS);
780 break;
781 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
782 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS);
783 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
784 break;
785 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS
786 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS);
787 break;
788 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
789 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS);
790 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
791 break;
792 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS
793 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS);
794 break;
795 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
796 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS);
797 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
798 break;
799 default:
800 return false;
801 }
802
803 MachineIRBuilder B(I);
804 for (const struct Instr &Instruction : Instructions) {
805 MachineInstrBuilder MIB = B.buildInstr(
806 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
807
808 if (Instruction.hasImm())
809 MIB.addImm(Instruction.RHS);
810 else
811 MIB.addUse(Instruction.RHS);
812
813 if (!MIB.constrainAllUses(TII, TRI, RBI))
814 return false;
815 }
816
817 I.eraseFromParent();
818 return true;
819 }
820 case G_FCMP: {
821 unsigned MipsFCMPCondCode;
822 bool isLogicallyNegated;
823 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
824 I.getOperand(1).getPredicate())) {
825 case CmpInst::FCMP_UNO: // Unordered
826 case CmpInst::FCMP_ORD: // Ordered (OR)
827 MipsFCMPCondCode = Mips::FCOND_UN;
828 isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
829 break;
830 case CmpInst::FCMP_OEQ: // Equal
831 case CmpInst::FCMP_UNE: // Not Equal (NEQ)
832 MipsFCMPCondCode = Mips::FCOND_OEQ;
833 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
834 break;
835 case CmpInst::FCMP_UEQ: // Unordered or Equal
836 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
837 MipsFCMPCondCode = Mips::FCOND_UEQ;
838 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
839 break;
840 case CmpInst::FCMP_OLT: // Ordered or Less Than
841 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
842 MipsFCMPCondCode = Mips::FCOND_OLT;
843 isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
844 break;
845 case CmpInst::FCMP_ULT: // Unordered or Less Than
846 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
847 MipsFCMPCondCode = Mips::FCOND_ULT;
848 isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
849 break;
850 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
851 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
852 MipsFCMPCondCode = Mips::FCOND_OLE;
853 isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
854 break;
855 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
856 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
857 MipsFCMPCondCode = Mips::FCOND_ULE;
858 isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
859 break;
860 default:
861 return false;
862 }
863
864 // Default compare result in gpr register will be `true`.
865 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
866 // using MOVF_I. When orignal predicate (Cond) is logically negated
867 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
868 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
869
870 Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
871 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
872 .addDef(TrueInReg)
873 .addUse(Mips::ZERO)
874 .addImm(1);
875
876 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
877 unsigned FCMPOpcode =
878 Size == 32 ? Mips::FCMP_S32
879 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
880 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode))
881 .addUse(I.getOperand(2).getReg())
882 .addUse(I.getOperand(3).getReg())
883 .addImm(MipsFCMPCondCode);
884 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI))
885 return false;
886
887 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode))
888 .addDef(I.getOperand(0).getReg())
889 .addUse(Mips::ZERO)
890 .addUse(Mips::FCC0)
891 .addUse(TrueInReg);
892 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
893 return false;
894
895 I.eraseFromParent();
896 return true;
897 }
898 case G_FENCE: {
899 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0);
900 break;
901 }
902 case G_VASTART: {
903 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
904 int FI = FuncInfo->getVarArgsFrameIndex();
905
906 Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
907 MachineInstr *LEA_ADDiu =
908 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu))
909 .addDef(LeaReg)
910 .addFrameIndex(FI)
911 .addImm(0);
912 if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI))
913 return false;
914
915 MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW))
916 .addUse(LeaReg)
917 .addUse(I.getOperand(0).getReg())
918 .addImm(0);
919 if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI))
920 return false;
921
922 I.eraseFromParent();
923 return true;
924 }
925 default:
926 return false;
927 }
928
929 I.eraseFromParent();
931}
932
933namespace llvm {
934InstructionSelector *
936 const MipsSubtarget &Subtarget,
937 const MipsRegisterBankInfo &RBI) {
938 return new MipsInstructionSelector(TM, Subtarget, RBI);
939}
940} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isStore(int Opcode)
MachineBasicBlock & MBB
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
This file declares the targeting of the RegisterBankInfo class for Mips.
static StringRef getName(Value *V)
const SmallVectorImpl< MachineOperand > & Cond
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
BinaryOperator * Mul
APInt bitcastToAPInt() const
Definition APFloat.h:1335
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
Definition APInt.cpp:644
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition APInt.cpp:639
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
bool hasLocalLinkage() const
constexpr bool isScalar() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
bool hasValue() const
TypeSize getValue() const
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
@ MOLoad
The memory access reads data.
LLVM_ABI Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class provides the information for the target register banks.
bool isFP64bit() const
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
bool isPositionIndependent() const
Value * getOperand(unsigned i) const
Definition User.h:233
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
bool hasImm(uint64_t TSFlags)
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
InstructionSelector * createMipsInstructionSelector(const MipsTargetMachine &, const MipsSubtarget &, const MipsRegisterBankInfo &)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.