LLVM 20.0.0git
VEInstrInfo.cpp
Go to the documentation of this file.
1//===-- VEInstrInfo.cpp - VE Instruction Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the VE implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "VEInstrInfo.h"
14#include "VE.h"
16#include "VESubtarget.h"
17#include "llvm/ADT/STLExtras.h"
24#include "llvm/Support/Debug.h"
26
27#define DEBUG_TYPE "ve-instr-info"
28
29using namespace llvm;
30
31#define GET_INSTRINFO_CTOR_DTOR
32#include "VEGenInstrInfo.inc"
33
34// Pin the vtable to this file.
35void VEInstrInfo::anchor() {}
36
38 : VEGenInstrInfo(VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {}
39
40static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); }
41
43 switch (CC) {
44 case VECC::CC_IG:
45 return VECC::CC_ILE;
46 case VECC::CC_IL:
47 return VECC::CC_IGE;
48 case VECC::CC_INE:
49 return VECC::CC_IEQ;
50 case VECC::CC_IEQ:
51 return VECC::CC_INE;
52 case VECC::CC_IGE:
53 return VECC::CC_IL;
54 case VECC::CC_ILE:
55 return VECC::CC_IG;
56 case VECC::CC_AF:
57 return VECC::CC_AT;
58 case VECC::CC_G:
59 return VECC::CC_LENAN;
60 case VECC::CC_L:
61 return VECC::CC_GENAN;
62 case VECC::CC_NE:
63 return VECC::CC_EQNAN;
64 case VECC::CC_EQ:
65 return VECC::CC_NENAN;
66 case VECC::CC_GE:
67 return VECC::CC_LNAN;
68 case VECC::CC_LE:
69 return VECC::CC_GNAN;
70 case VECC::CC_NUM:
71 return VECC::CC_NAN;
72 case VECC::CC_NAN:
73 return VECC::CC_NUM;
74 case VECC::CC_GNAN:
75 return VECC::CC_LE;
76 case VECC::CC_LNAN:
77 return VECC::CC_GE;
78 case VECC::CC_NENAN:
79 return VECC::CC_EQ;
80 case VECC::CC_EQNAN:
81 return VECC::CC_NE;
82 case VECC::CC_GENAN:
83 return VECC::CC_L;
84 case VECC::CC_LENAN:
85 return VECC::CC_G;
86 case VECC::CC_AT:
87 return VECC::CC_AF;
88 case VECC::UNKNOWN:
89 return VECC::UNKNOWN;
90 }
91 llvm_unreachable("Invalid cond code");
92}
93
94// Treat a branch relative long always instruction as unconditional branch.
95// For example, br.l.t and br.l.
96static bool isUncondBranchOpcode(int Opc) {
97 using namespace llvm::VE;
98
99#define BRKIND(NAME) (Opc == NAME##a || Opc == NAME##a_nt || Opc == NAME##a_t)
100 // VE has other branch relative always instructions for word/double/float,
101 // but we use only long branches in our lower. So, check it here.
102 assert(!BRKIND(BRCFW) && !BRKIND(BRCFD) && !BRKIND(BRCFS) &&
103 "Branch relative word/double/float always instructions should not be "
104 "used!");
105 return BRKIND(BRCFL);
106#undef BRKIND
107}
108
109// Treat branch relative conditional as conditional branch instructions.
110// For example, brgt.l.t and brle.s.nt.
111static bool isCondBranchOpcode(int Opc) {
112 using namespace llvm::VE;
113
114#define BRKIND(NAME) \
115 (Opc == NAME##rr || Opc == NAME##rr_nt || Opc == NAME##rr_t || \
116 Opc == NAME##ir || Opc == NAME##ir_nt || Opc == NAME##ir_t)
117 return BRKIND(BRCFL) || BRKIND(BRCFW) || BRKIND(BRCFD) || BRKIND(BRCFS);
118#undef BRKIND
119}
120
121// Treat branch long always instructions as indirect branch.
122// For example, b.l.t and b.l.
123static bool isIndirectBranchOpcode(int Opc) {
124 using namespace llvm::VE;
125
126#define BRKIND(NAME) \
127 (Opc == NAME##ari || Opc == NAME##ari_nt || Opc == NAME##ari_t)
128 // VE has other branch always instructions for word/double/float, but
129 // we use only long branches in our lower. So, check it here.
130 assert(!BRKIND(BCFW) && !BRKIND(BCFD) && !BRKIND(BCFS) &&
131 "Branch word/double/float always instructions should not be used!");
132 return BRKIND(BCFL);
133#undef BRKIND
134}
135
138 Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(0).getImm()));
139 Cond.push_back(LastInst->getOperand(1));
140 Cond.push_back(LastInst->getOperand(2));
141 Target = LastInst->getOperand(3).getMBB();
142}
143
145 MachineBasicBlock *&FBB,
147 bool AllowModify) const {
149 if (I == MBB.end())
150 return false;
151
152 if (!isUnpredicatedTerminator(*I))
153 return false;
154
155 // Get the last instruction in the block.
156 MachineInstr *LastInst = &*I;
157 unsigned LastOpc = LastInst->getOpcode();
158
159 // If there is only one terminator instruction, process it.
160 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
161 if (isUncondBranchOpcode(LastOpc)) {
162 TBB = LastInst->getOperand(0).getMBB();
163 return false;
164 }
165 if (isCondBranchOpcode(LastOpc)) {
166 // Block ends with fall-through condbranch.
167 parseCondBranch(LastInst, TBB, Cond);
168 return false;
169 }
170 return true; // Can't handle indirect branch.
171 }
172
173 // Get the instruction before it if it is a terminator.
174 MachineInstr *SecondLastInst = &*I;
175 unsigned SecondLastOpc = SecondLastInst->getOpcode();
176
177 // If AllowModify is true and the block ends with two or more unconditional
178 // branches, delete all but the first unconditional branch.
179 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
180 while (isUncondBranchOpcode(SecondLastOpc)) {
181 LastInst->eraseFromParent();
182 LastInst = SecondLastInst;
183 LastOpc = LastInst->getOpcode();
184 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
185 // Return now the only terminator is an unconditional branch.
186 TBB = LastInst->getOperand(0).getMBB();
187 return false;
188 }
189 SecondLastInst = &*I;
190 SecondLastOpc = SecondLastInst->getOpcode();
191 }
192 }
193
194 // If there are three terminators, we don't know what sort of block this is.
195 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
196 return true;
197
198 // If the block ends with a B and a Bcc, handle it.
199 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
200 parseCondBranch(SecondLastInst, TBB, Cond);
201 FBB = LastInst->getOperand(0).getMBB();
202 return false;
203 }
204
205 // If the block ends with two unconditional branches, handle it. The second
206 // one is not executed.
207 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
208 TBB = SecondLastInst->getOperand(0).getMBB();
209 return false;
210 }
211
212 // ...likewise if it ends with an indirect branch followed by an unconditional
213 // branch.
214 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
215 I = LastInst;
216 if (AllowModify)
217 I->eraseFromParent();
218 return true;
219 }
220
221 // Otherwise, can't handle this.
222 return true;
223}
224
229 const DebugLoc &DL, int *BytesAdded) const {
230 assert(TBB && "insertBranch must not be told to insert a fallthrough");
231 assert((Cond.size() == 3 || Cond.size() == 0) &&
232 "VE branch conditions should have three component!");
233 assert(!BytesAdded && "code size not handled");
234 if (Cond.empty()) {
235 // Uncondition branch
236 assert(!FBB && "Unconditional branch with multiple successors!");
237 BuildMI(&MBB, DL, get(VE::BRCFLa_t))
238 .addMBB(TBB);
239 return 1;
240 }
241
242 // Conditional branch
243 // (BRCFir CC sy sz addr)
244 assert(Cond[0].isImm() && Cond[2].isReg() && "not implemented");
245
246 unsigned opc[2];
249 const MachineRegisterInfo &MRI = MF->getRegInfo();
250 Register Reg = Cond[2].getReg();
251 if (IsIntegerCC(Cond[0].getImm())) {
252 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
253 opc[0] = VE::BRCFWir;
254 opc[1] = VE::BRCFWrr;
255 } else {
256 opc[0] = VE::BRCFLir;
257 opc[1] = VE::BRCFLrr;
258 }
259 } else {
260 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
261 opc[0] = VE::BRCFSir;
262 opc[1] = VE::BRCFSrr;
263 } else {
264 opc[0] = VE::BRCFDir;
265 opc[1] = VE::BRCFDrr;
266 }
267 }
268 if (Cond[1].isImm()) {
269 BuildMI(&MBB, DL, get(opc[0]))
270 .add(Cond[0]) // condition code
271 .add(Cond[1]) // lhs
272 .add(Cond[2]) // rhs
273 .addMBB(TBB);
274 } else {
275 BuildMI(&MBB, DL, get(opc[1]))
276 .add(Cond[0])
277 .add(Cond[1])
278 .add(Cond[2])
279 .addMBB(TBB);
280 }
281
282 if (!FBB)
283 return 1;
284
285 BuildMI(&MBB, DL, get(VE::BRCFLa_t))
286 .addMBB(FBB);
287 return 2;
288}
289
291 int *BytesRemoved) const {
292 assert(!BytesRemoved && "code size not handled");
293
295 unsigned Count = 0;
296 while (I != MBB.begin()) {
297 --I;
298
299 if (I->isDebugValue())
300 continue;
301
302 if (!isUncondBranchOpcode(I->getOpcode()) &&
303 !isCondBranchOpcode(I->getOpcode()))
304 break; // Not a branch
305
306 I->eraseFromParent();
307 I = MBB.end();
308 ++Count;
309 }
310 return Count;
311}
312
315 VECC::CondCode CC = static_cast<VECC::CondCode>(Cond[0].getImm());
317 return false;
318}
319
320static bool IsAliasOfSX(Register Reg) {
321 return VE::I32RegClass.contains(Reg) || VE::I64RegClass.contains(Reg) ||
322 VE::F32RegClass.contains(Reg);
323}
324
327 MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
328 const MCInstrDesc &MCID, unsigned int NumSubRegs,
329 const unsigned *SubRegIdx,
330 const TargetRegisterInfo *TRI) {
331 MachineInstr *MovMI = nullptr;
332
333 for (unsigned Idx = 0; Idx != NumSubRegs; ++Idx) {
334 Register SubDest = TRI->getSubReg(DestReg, SubRegIdx[Idx]);
335 Register SubSrc = TRI->getSubReg(SrcReg, SubRegIdx[Idx]);
336 assert(SubDest && SubSrc && "Bad sub-register");
337
338 if (MCID.getOpcode() == VE::ORri) {
339 // generate "ORri, dest, src, 0" instruction.
341 BuildMI(MBB, I, DL, MCID, SubDest).addReg(SubSrc).addImm(0);
342 MovMI = MIB.getInstr();
343 } else if (MCID.getOpcode() == VE::ANDMmm) {
344 // generate "ANDM, dest, vm0, src" instruction.
346 BuildMI(MBB, I, DL, MCID, SubDest).addReg(VE::VM0).addReg(SubSrc);
347 MovMI = MIB.getInstr();
348 } else {
349 llvm_unreachable("Unexpected reg-to-reg copy instruction");
350 }
351 }
352 // Add implicit super-register defs and kills to the last MovMI.
353 MovMI->addRegisterDefined(DestReg, TRI);
354 if (KillSrc)
355 MovMI->addRegisterKilled(SrcReg, TRI, true);
356}
357
360 MCRegister DestReg, MCRegister SrcReg,
361 bool KillSrc, bool RenamableDest,
362 bool RenamableSrc) const {
363
364 if (IsAliasOfSX(SrcReg) && IsAliasOfSX(DestReg)) {
365 BuildMI(MBB, I, DL, get(VE::ORri), DestReg)
366 .addReg(SrcReg, getKillRegState(KillSrc))
367 .addImm(0);
368 } else if (VE::V64RegClass.contains(DestReg, SrcReg)) {
369 // Generate following instructions
370 // %sw16 = LEA32zii 256
371 // VORmvl %dest, (0)1, %src, %sw16
372 // TODO: reuse a register if vl is already assigned to a register
373 // FIXME: it would be better to scavenge a register here instead of
374 // reserving SX16 all of the time.
376 Register TmpReg = VE::SX16;
377 Register SubTmp = TRI->getSubReg(TmpReg, VE::sub_i32);
378 BuildMI(MBB, I, DL, get(VE::LEAzii), TmpReg)
379 .addImm(0)
380 .addImm(0)
381 .addImm(256);
382 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(VE::VORmvl), DestReg)
383 .addImm(M1(0)) // Represent (0)1.
384 .addReg(SrcReg, getKillRegState(KillSrc))
385 .addReg(SubTmp, getKillRegState(true));
386 MIB.getInstr()->addRegisterKilled(TmpReg, TRI, true);
387 } else if (VE::VMRegClass.contains(DestReg, SrcReg)) {
388 BuildMI(MBB, I, DL, get(VE::ANDMmm), DestReg)
389 .addReg(VE::VM0)
390 .addReg(SrcReg, getKillRegState(KillSrc));
391 } else if (VE::VM512RegClass.contains(DestReg, SrcReg)) {
392 // Use two instructions.
393 const unsigned SubRegIdx[] = {VE::sub_vm_even, VE::sub_vm_odd};
394 unsigned int NumSubRegs = 2;
395 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ANDMmm),
396 NumSubRegs, SubRegIdx, &getRegisterInfo());
397 } else if (VE::F128RegClass.contains(DestReg, SrcReg)) {
398 // Use two instructions.
399 const unsigned SubRegIdx[] = {VE::sub_even, VE::sub_odd};
400 unsigned int NumSubRegs = 2;
401 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ORri),
402 NumSubRegs, SubRegIdx, &getRegisterInfo());
403 } else {
405 dbgs() << "Impossible reg-to-reg copy from " << printReg(SrcReg, TRI)
406 << " to " << printReg(DestReg, TRI) << "\n";
407 llvm_unreachable("Impossible reg-to-reg copy");
408 }
409}
410
411/// isLoadFromStackSlot - If the specified machine instruction is a direct
412/// load from a stack slot, return the virtual or physical register number of
413/// the destination along with the FrameIndex of the loaded stack slot. If
414/// not, return 0. This predicate must return 0 if the instruction has
415/// any side effects other than loading from the stack slot.
417 int &FrameIndex) const {
418 if (MI.getOpcode() == VE::LDrii || // I64
419 MI.getOpcode() == VE::LDLSXrii || // I32
420 MI.getOpcode() == VE::LDUrii || // F32
421 MI.getOpcode() == VE::LDQrii || // F128 (pseudo)
422 MI.getOpcode() == VE::LDVMrii || // VM (pseudo)
423 MI.getOpcode() == VE::LDVM512rii // VM512 (pseudo)
424 ) {
425 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
426 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).isImm() &&
427 MI.getOperand(3).getImm() == 0) {
428 FrameIndex = MI.getOperand(1).getIndex();
429 return MI.getOperand(0).getReg();
430 }
431 }
432 return 0;
433}
434
435/// isStoreToStackSlot - If the specified machine instruction is a direct
436/// store to a stack slot, return the virtual or physical register number of
437/// the source reg along with the FrameIndex of the loaded stack slot. If
438/// not, return 0. This predicate must return 0 if the instruction has
439/// any side effects other than storing to the stack slot.
441 int &FrameIndex) const {
442 if (MI.getOpcode() == VE::STrii || // I64
443 MI.getOpcode() == VE::STLrii || // I32
444 MI.getOpcode() == VE::STUrii || // F32
445 MI.getOpcode() == VE::STQrii || // F128 (pseudo)
446 MI.getOpcode() == VE::STVMrii || // VM (pseudo)
447 MI.getOpcode() == VE::STVM512rii // VM512 (pseudo)
448 ) {
449 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
450 MI.getOperand(1).getImm() == 0 && MI.getOperand(2).isImm() &&
451 MI.getOperand(2).getImm() == 0) {
452 FrameIndex = MI.getOperand(0).getIndex();
453 return MI.getOperand(3).getReg();
454 }
455 }
456 return 0;
457}
458
461 Register SrcReg, bool isKill, int FI,
462 const TargetRegisterClass *RC,
463 const TargetRegisterInfo *TRI,
464 Register VReg) const {
465 DebugLoc DL;
466 if (I != MBB.end())
467 DL = I->getDebugLoc();
468
470 const MachineFrameInfo &MFI = MF->getFrameInfo();
473 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
474
475 // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
476 if (RC == &VE::I64RegClass) {
477 BuildMI(MBB, I, DL, get(VE::STrii))
478 .addFrameIndex(FI)
479 .addImm(0)
480 .addImm(0)
481 .addReg(SrcReg, getKillRegState(isKill))
482 .addMemOperand(MMO);
483 } else if (RC == &VE::I32RegClass) {
484 BuildMI(MBB, I, DL, get(VE::STLrii))
485 .addFrameIndex(FI)
486 .addImm(0)
487 .addImm(0)
488 .addReg(SrcReg, getKillRegState(isKill))
489 .addMemOperand(MMO);
490 } else if (RC == &VE::F32RegClass) {
491 BuildMI(MBB, I, DL, get(VE::STUrii))
492 .addFrameIndex(FI)
493 .addImm(0)
494 .addImm(0)
495 .addReg(SrcReg, getKillRegState(isKill))
496 .addMemOperand(MMO);
497 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
498 BuildMI(MBB, I, DL, get(VE::STQrii))
499 .addFrameIndex(FI)
500 .addImm(0)
501 .addImm(0)
502 .addReg(SrcReg, getKillRegState(isKill))
503 .addMemOperand(MMO);
504 } else if (RC == &VE::VMRegClass) {
505 BuildMI(MBB, I, DL, get(VE::STVMrii))
506 .addFrameIndex(FI)
507 .addImm(0)
508 .addImm(0)
509 .addReg(SrcReg, getKillRegState(isKill))
510 .addMemOperand(MMO);
511 } else if (VE::VM512RegClass.hasSubClassEq(RC)) {
512 BuildMI(MBB, I, DL, get(VE::STVM512rii))
513 .addFrameIndex(FI)
514 .addImm(0)
515 .addImm(0)
516 .addReg(SrcReg, getKillRegState(isKill))
517 .addMemOperand(MMO);
518 } else
519 report_fatal_error("Can't store this register to stack slot");
520}
521
524 Register DestReg, int FI,
525 const TargetRegisterClass *RC,
526 const TargetRegisterInfo *TRI,
527 Register VReg) const {
528 DebugLoc DL;
529 if (I != MBB.end())
530 DL = I->getDebugLoc();
531
533 const MachineFrameInfo &MFI = MF->getFrameInfo();
536 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
537
538 if (RC == &VE::I64RegClass) {
539 BuildMI(MBB, I, DL, get(VE::LDrii), DestReg)
540 .addFrameIndex(FI)
541 .addImm(0)
542 .addImm(0)
543 .addMemOperand(MMO);
544 } else if (RC == &VE::I32RegClass) {
545 BuildMI(MBB, I, DL, get(VE::LDLSXrii), DestReg)
546 .addFrameIndex(FI)
547 .addImm(0)
548 .addImm(0)
549 .addMemOperand(MMO);
550 } else if (RC == &VE::F32RegClass) {
551 BuildMI(MBB, I, DL, get(VE::LDUrii), DestReg)
552 .addFrameIndex(FI)
553 .addImm(0)
554 .addImm(0)
555 .addMemOperand(MMO);
556 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
557 BuildMI(MBB, I, DL, get(VE::LDQrii), DestReg)
558 .addFrameIndex(FI)
559 .addImm(0)
560 .addImm(0)
561 .addMemOperand(MMO);
562 } else if (RC == &VE::VMRegClass) {
563 BuildMI(MBB, I, DL, get(VE::LDVMrii), DestReg)
564 .addFrameIndex(FI)
565 .addImm(0)
566 .addImm(0)
567 .addMemOperand(MMO);
568 } else if (VE::VM512RegClass.hasSubClassEq(RC)) {
569 BuildMI(MBB, I, DL, get(VE::LDVM512rii), DestReg)
570 .addFrameIndex(FI)
571 .addImm(0)
572 .addImm(0)
573 .addMemOperand(MMO);
574 } else
575 report_fatal_error("Can't load this register from stack slot");
576}
577
579 Register Reg, MachineRegisterInfo *MRI) const {
580 LLVM_DEBUG(dbgs() << "foldImmediate\n");
581
582 LLVM_DEBUG(dbgs() << "checking DefMI\n");
583 int64_t ImmVal;
584 switch (DefMI.getOpcode()) {
585 default:
586 return false;
587 case VE::ORim:
588 // General move small immediate instruction on VE.
589 LLVM_DEBUG(dbgs() << "checking ORim\n");
590 LLVM_DEBUG(DefMI.dump());
591 // FIXME: We may need to support FPImm too.
592 assert(DefMI.getOperand(1).isImm());
593 assert(DefMI.getOperand(2).isImm());
594 ImmVal =
595 DefMI.getOperand(1).getImm() + mimm2Val(DefMI.getOperand(2).getImm());
596 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
597 break;
598 case VE::LEAzii:
599 // General move immediate instruction on VE.
600 LLVM_DEBUG(dbgs() << "checking LEAzii\n");
601 LLVM_DEBUG(DefMI.dump());
602 // FIXME: We may need to support FPImm too.
603 assert(DefMI.getOperand(2).isImm());
604 if (!DefMI.getOperand(3).isImm())
605 // LEAzii may refer label
606 return false;
607 ImmVal = DefMI.getOperand(2).getImm() + DefMI.getOperand(3).getImm();
608 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
609 break;
610 }
611
612 // Try to fold like below:
613 // %1:i64 = ORim 0, 0(1)
614 // %2:i64 = CMPSLrr %0, %1
615 // To
616 // %2:i64 = CMPSLrm %0, 0(1)
617 //
618 // Another example:
619 // %1:i64 = ORim 6, 0(1)
620 // %2:i64 = CMPSLrr %1, %0
621 // To
622 // %2:i64 = CMPSLir 6, %0
623 //
624 // Support commutable instructions like below:
625 // %1:i64 = ORim 6, 0(1)
626 // %2:i64 = ADDSLrr %1, %0
627 // To
628 // %2:i64 = ADDSLri %0, 6
629 //
630 // FIXME: Need to support i32. Current implementtation requires
631 // EXTRACT_SUBREG, so input has following COPY and it avoids folding:
632 // %1:i64 = ORim 6, 0(1)
633 // %2:i32 = COPY %1.sub_i32
634 // %3:i32 = ADDSWSXrr %0, %2
635 // FIXME: Need to support shift, cmov, and more instructions.
636 // FIXME: Need to support lvl too, but LVLGen runs after peephole-opt.
637
638 LLVM_DEBUG(dbgs() << "checking UseMI\n");
639 LLVM_DEBUG(UseMI.dump());
640 unsigned NewUseOpcSImm7;
641 unsigned NewUseOpcMImm;
642 enum InstType {
643 rr2ri_rm, // rr -> ri or rm, commutable
644 rr2ir_rm, // rr -> ir or rm
645 } InstType;
646
647 using namespace llvm::VE;
648#define INSTRKIND(NAME) \
649 case NAME##rr: \
650 NewUseOpcSImm7 = NAME##ri; \
651 NewUseOpcMImm = NAME##rm; \
652 InstType = rr2ri_rm; \
653 break
654#define NCINSTRKIND(NAME) \
655 case NAME##rr: \
656 NewUseOpcSImm7 = NAME##ir; \
657 NewUseOpcMImm = NAME##rm; \
658 InstType = rr2ir_rm; \
659 break
660
661 switch (UseMI.getOpcode()) {
662 default:
663 return false;
664
665 INSTRKIND(ADDUL);
666 INSTRKIND(ADDSWSX);
667 INSTRKIND(ADDSWZX);
668 INSTRKIND(ADDSL);
669 NCINSTRKIND(SUBUL);
670 NCINSTRKIND(SUBSWSX);
671 NCINSTRKIND(SUBSWZX);
672 NCINSTRKIND(SUBSL);
673 INSTRKIND(MULUL);
674 INSTRKIND(MULSWSX);
675 INSTRKIND(MULSWZX);
676 INSTRKIND(MULSL);
677 NCINSTRKIND(DIVUL);
678 NCINSTRKIND(DIVSWSX);
679 NCINSTRKIND(DIVSWZX);
680 NCINSTRKIND(DIVSL);
681 NCINSTRKIND(CMPUL);
682 NCINSTRKIND(CMPSWSX);
683 NCINSTRKIND(CMPSWZX);
684 NCINSTRKIND(CMPSL);
685 INSTRKIND(MAXSWSX);
686 INSTRKIND(MAXSWZX);
687 INSTRKIND(MAXSL);
688 INSTRKIND(MINSWSX);
689 INSTRKIND(MINSWZX);
690 INSTRKIND(MINSL);
691 INSTRKIND(AND);
692 INSTRKIND(OR);
693 INSTRKIND(XOR);
694 INSTRKIND(EQV);
695 NCINSTRKIND(NND);
696 NCINSTRKIND(MRG);
697 }
698
699#undef INSTRKIND
700
701 unsigned NewUseOpc;
702 unsigned UseIdx;
703 bool Commute = false;
704 LLVM_DEBUG(dbgs() << "checking UseMI operands\n");
705 switch (InstType) {
706 case rr2ri_rm:
707 UseIdx = 2;
708 if (UseMI.getOperand(1).getReg() == Reg) {
709 Commute = true;
710 } else {
711 assert(UseMI.getOperand(2).getReg() == Reg);
712 }
713 if (isInt<7>(ImmVal)) {
714 // This ImmVal matches to SImm7 slot, so change UseOpc to an instruction
715 // holds a simm7 slot.
716 NewUseOpc = NewUseOpcSImm7;
717 } else if (isMImmVal(ImmVal)) {
718 // Similarly, change UseOpc to an instruction holds a mimm slot.
719 NewUseOpc = NewUseOpcMImm;
720 ImmVal = val2MImm(ImmVal);
721 } else
722 return false;
723 break;
724 case rr2ir_rm:
725 if (UseMI.getOperand(1).getReg() == Reg) {
726 // Check immediate value whether it matchs to the UseMI instruction.
727 if (!isInt<7>(ImmVal))
728 return false;
729 NewUseOpc = NewUseOpcSImm7;
730 UseIdx = 1;
731 } else {
732 assert(UseMI.getOperand(2).getReg() == Reg);
733 // Check immediate value whether it matchs to the UseMI instruction.
734 if (!isMImmVal(ImmVal))
735 return false;
736 NewUseOpc = NewUseOpcMImm;
737 ImmVal = val2MImm(ImmVal);
738 UseIdx = 2;
739 }
740 break;
741 }
742
743 LLVM_DEBUG(dbgs() << "modifying UseMI\n");
744 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
745 UseMI.setDesc(get(NewUseOpc));
746 if (Commute) {
747 UseMI.getOperand(1).setReg(UseMI.getOperand(UseIdx).getReg());
748 }
749 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
750 if (DeleteDef)
751 DefMI.eraseFromParent();
752
753 return true;
754}
755
758 Register GlobalBaseReg = VEFI->getGlobalBaseReg();
759 if (GlobalBaseReg != 0)
760 return GlobalBaseReg;
761
762 // We use %s15 (%got) as a global base register
763 GlobalBaseReg = VE::SX15;
764
765 // Insert a pseudo instruction to set the GlobalBaseReg into the first
766 // MBB of the function
767 MachineBasicBlock &FirstMBB = MF->front();
769 DebugLoc dl;
770 BuildMI(FirstMBB, MBBI, dl, get(VE::GETGOT), GlobalBaseReg);
771 VEFI->setGlobalBaseReg(GlobalBaseReg);
772 return GlobalBaseReg;
773}
774
776 return (reg - VE::VMP0) * 2 + VE::VM0;
777}
778
779static Register getVM512Lower(Register reg) { return getVM512Upper(reg) + 1; }
780
781// Expand pseudo logical vector instructions for VM512 registers.
782static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID) {
783 MachineBasicBlock *MBB = MI.getParent();
784 DebugLoc DL = MI.getDebugLoc();
785
786 Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
787 Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
788 Register VMYu = getVM512Upper(MI.getOperand(1).getReg());
789 Register VMYl = getVM512Lower(MI.getOperand(1).getReg());
790
791 switch (MI.getOpcode()) {
792 default: {
793 Register VMZu = getVM512Upper(MI.getOperand(2).getReg());
794 Register VMZl = getVM512Lower(MI.getOperand(2).getReg());
795 BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu).addUse(VMZu);
796 BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl).addUse(VMZl);
797 break;
798 }
799 case VE::NEGMy:
800 BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu);
801 BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl);
802 break;
803 }
804 MI.eraseFromParent();
805}
806
808 bool Upper) {
809 // VM512
810 MIB.addReg(Upper ? getVM512Upper(MI.getOperand(0).getReg())
811 : getVM512Lower(MI.getOperand(0).getReg()));
812
813 switch (MI.getNumExplicitOperands()) {
814 default:
815 report_fatal_error("unexpected number of operands for pvfmk");
816 case 2: // _Ml: VM512, VL
817 // VL
818 MIB.addReg(MI.getOperand(1).getReg());
819 break;
820 case 4: // _Mvl: VM512, CC, VR, VL
821 // CC
822 MIB.addImm(MI.getOperand(1).getImm());
823 // VR
824 MIB.addReg(MI.getOperand(2).getReg());
825 // VL
826 MIB.addReg(MI.getOperand(3).getReg());
827 break;
828 case 5: // _MvMl: VM512, CC, VR, VM512, VL
829 // CC
830 MIB.addImm(MI.getOperand(1).getImm());
831 // VR
832 MIB.addReg(MI.getOperand(2).getReg());
833 // VM512
834 MIB.addReg(Upper ? getVM512Upper(MI.getOperand(3).getReg())
835 : getVM512Lower(MI.getOperand(3).getReg()));
836 // VL
837 MIB.addReg(MI.getOperand(4).getReg());
838 break;
839 }
840}
841
843 // replace to pvfmk.w.up and pvfmk.w.lo
844 // replace to pvfmk.s.up and pvfmk.s.lo
845
846 static const std::pair<unsigned, std::pair<unsigned, unsigned>> VFMKMap[] = {
847 {VE::VFMKyal, {VE::VFMKLal, VE::VFMKLal}},
848 {VE::VFMKynal, {VE::VFMKLnal, VE::VFMKLnal}},
849 {VE::VFMKWyvl, {VE::PVFMKWUPvl, VE::PVFMKWLOvl}},
850 {VE::VFMKWyvyl, {VE::PVFMKWUPvml, VE::PVFMKWLOvml}},
851 {VE::VFMKSyvl, {VE::PVFMKSUPvl, VE::PVFMKSLOvl}},
852 {VE::VFMKSyvyl, {VE::PVFMKSUPvml, VE::PVFMKSLOvml}},
853 };
854
855 unsigned Opcode = MI.getOpcode();
856
857 const auto *Found =
858 llvm::find_if(VFMKMap, [&](auto P) { return P.first == Opcode; });
859 if (Found == std::end(VFMKMap))
860 report_fatal_error("unexpected opcode for pseudo vfmk");
861
862 unsigned OpcodeUpper = (*Found).second.first;
863 unsigned OpcodeLower = (*Found).second.second;
864
865 MachineBasicBlock *MBB = MI.getParent();
866 DebugLoc DL = MI.getDebugLoc();
867
868 MachineInstrBuilder Bu = BuildMI(*MBB, MI, DL, TI.get(OpcodeUpper));
869 addOperandsForVFMK(Bu, MI, /* Upper */ true);
870 MachineInstrBuilder Bl = BuildMI(*MBB, MI, DL, TI.get(OpcodeLower));
871 addOperandsForVFMK(Bl, MI, /* Upper */ false);
872
873 MI.eraseFromParent();
874}
875
877 switch (MI.getOpcode()) {
878 case VE::EXTEND_STACK: {
880 }
881 case VE::EXTEND_STACK_GUARD: {
882 MI.eraseFromParent(); // The pseudo instruction is gone now.
883 return true;
884 }
885 case VE::GETSTACKTOP: {
887 }
888
889 case VE::ANDMyy:
890 expandPseudoLogM(MI, get(VE::ANDMmm));
891 return true;
892 case VE::ORMyy:
893 expandPseudoLogM(MI, get(VE::ORMmm));
894 return true;
895 case VE::XORMyy:
896 expandPseudoLogM(MI, get(VE::XORMmm));
897 return true;
898 case VE::EQVMyy:
899 expandPseudoLogM(MI, get(VE::EQVMmm));
900 return true;
901 case VE::NNDMyy:
902 expandPseudoLogM(MI, get(VE::NNDMmm));
903 return true;
904 case VE::NEGMy:
905 expandPseudoLogM(MI, get(VE::NEGMm));
906 return true;
907
908 case VE::LVMyir:
909 case VE::LVMyim:
910 case VE::LVMyir_y:
911 case VE::LVMyim_y: {
912 Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
913 Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
914 int64_t Imm = MI.getOperand(1).getImm();
915 bool IsSrcReg =
916 MI.getOpcode() == VE::LVMyir || MI.getOpcode() == VE::LVMyir_y;
917 Register Src = IsSrcReg ? MI.getOperand(2).getReg() : VE::NoRegister;
918 int64_t MImm = IsSrcReg ? 0 : MI.getOperand(2).getImm();
919 bool KillSrc = IsSrcReg ? MI.getOperand(2).isKill() : false;
920 Register VMX = VMXl;
921 if (Imm >= 4) {
922 VMX = VMXu;
923 Imm -= 4;
924 }
925 MachineBasicBlock *MBB = MI.getParent();
926 DebugLoc DL = MI.getDebugLoc();
927 switch (MI.getOpcode()) {
928 case VE::LVMyir:
929 BuildMI(*MBB, MI, DL, get(VE::LVMir))
930 .addDef(VMX)
931 .addImm(Imm)
932 .addReg(Src, getKillRegState(KillSrc));
933 break;
934 case VE::LVMyim:
935 BuildMI(*MBB, MI, DL, get(VE::LVMim))
936 .addDef(VMX)
937 .addImm(Imm)
938 .addImm(MImm);
939 break;
940 case VE::LVMyir_y:
941 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
942 "LVMyir_y has different register in 3rd operand");
943 BuildMI(*MBB, MI, DL, get(VE::LVMir_m))
944 .addDef(VMX)
945 .addImm(Imm)
946 .addReg(Src, getKillRegState(KillSrc))
947 .addReg(VMX);
948 break;
949 case VE::LVMyim_y:
950 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
951 "LVMyim_y has different register in 3rd operand");
952 BuildMI(*MBB, MI, DL, get(VE::LVMim_m))
953 .addDef(VMX)
954 .addImm(Imm)
955 .addImm(MImm)
956 .addReg(VMX);
957 break;
958 }
959 MI.eraseFromParent();
960 return true;
961 }
962 case VE::SVMyi: {
963 Register Dest = MI.getOperand(0).getReg();
964 Register VMZu = getVM512Upper(MI.getOperand(1).getReg());
965 Register VMZl = getVM512Lower(MI.getOperand(1).getReg());
966 bool KillSrc = MI.getOperand(1).isKill();
967 int64_t Imm = MI.getOperand(2).getImm();
968 Register VMZ = VMZl;
969 if (Imm >= 4) {
970 VMZ = VMZu;
971 Imm -= 4;
972 }
973 MachineBasicBlock *MBB = MI.getParent();
974 DebugLoc DL = MI.getDebugLoc();
976 BuildMI(*MBB, MI, DL, get(VE::SVMmi), Dest).addReg(VMZ).addImm(Imm);
977 MachineInstr *Inst = MIB.getInstr();
978 if (KillSrc) {
980 Inst->addRegisterKilled(MI.getOperand(1).getReg(), TRI, true);
981 }
982 MI.eraseFromParent();
983 return true;
984 }
985 case VE::VFMKyal:
986 case VE::VFMKynal:
987 case VE::VFMKWyvl:
988 case VE::VFMKWyvyl:
989 case VE::VFMKSyvl:
990 case VE::VFMKSyvyl:
991 expandPseudoVFMK(*this, MI);
992 return true;
993 }
994 return false;
995}
996
998 MachineBasicBlock &MBB = *MI.getParent();
1000 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
1001 const VEInstrInfo &TII = *STI.getInstrInfo();
1003
1004 // Create following instructions and multiple basic blocks.
1005 //
1006 // thisBB:
1007 // brge.l.t %sp, %sl, sinkBB
1008 // syscallBB:
1009 // ld %s61, 0x18(, %tp) // load param area
1010 // or %s62, 0, %s0 // spill the value of %s0
1011 // lea %s63, 0x13b // syscall # of grow
1012 // shm.l %s63, 0x0(%s61) // store syscall # at addr:0
1013 // shm.l %sl, 0x8(%s61) // store old limit at addr:8
1014 // shm.l %sp, 0x10(%s61) // store new limit at addr:16
1015 // monc // call monitor
1016 // or %s0, 0, %s62 // restore the value of %s0
1017 // sinkBB:
1018
1019 // Create new MBB
1020 MachineBasicBlock *BB = &MBB;
1021 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1022 MachineBasicBlock *syscallMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1023 MachineBasicBlock *sinkMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1025 MF.insert(It, syscallMBB);
1026 MF.insert(It, sinkMBB);
1027
1028 // Transfer the remainder of BB and its successor edges to sinkMBB.
1029 sinkMBB->splice(sinkMBB->begin(), BB,
1030 std::next(std::next(MachineBasicBlock::iterator(MI))),
1031 BB->end());
1033
1034 // Next, add the true and fallthrough blocks as its successors.
1035 BB->addSuccessor(syscallMBB);
1036 BB->addSuccessor(sinkMBB);
1037 BuildMI(BB, dl, TII.get(VE::BRCFLrr_t))
1039 .addReg(VE::SX11) // %sp
1040 .addReg(VE::SX8) // %sl
1041 .addMBB(sinkMBB);
1042
1043 BB = syscallMBB;
1044
1045 // Update machine-CFG edges
1046 BB->addSuccessor(sinkMBB);
1047
1048 BuildMI(BB, dl, TII.get(VE::LDrii), VE::SX61)
1049 .addReg(VE::SX14)
1050 .addImm(0)
1051 .addImm(0x18);
1052 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX62)
1053 .addReg(VE::SX0)
1054 .addImm(0);
1055 BuildMI(BB, dl, TII.get(VE::LEAzii), VE::SX63)
1056 .addImm(0)
1057 .addImm(0)
1058 .addImm(0x13b);
1059 BuildMI(BB, dl, TII.get(VE::SHMLri))
1060 .addReg(VE::SX61)
1061 .addImm(0)
1062 .addReg(VE::SX63);
1063 BuildMI(BB, dl, TII.get(VE::SHMLri))
1064 .addReg(VE::SX61)
1065 .addImm(8)
1066 .addReg(VE::SX8);
1067 BuildMI(BB, dl, TII.get(VE::SHMLri))
1068 .addReg(VE::SX61)
1069 .addImm(16)
1070 .addReg(VE::SX11);
1071 BuildMI(BB, dl, TII.get(VE::MONC));
1072
1073 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX0)
1074 .addReg(VE::SX62)
1075 .addImm(0);
1076
1077 MI.eraseFromParent(); // The pseudo instruction is gone now.
1078 return true;
1079}
1080
1082 MachineBasicBlock *MBB = MI.getParent();
1083 MachineFunction &MF = *MBB->getParent();
1084 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
1085 const VEInstrInfo &TII = *STI.getInstrInfo();
1087
1088 // Create following instruction
1089 //
1090 // dst = %sp + target specific frame + the size of parameter area
1091
1092 const MachineFrameInfo &MFI = MF.getFrameInfo();
1093 const VEFrameLowering &TFL = *STI.getFrameLowering();
1094
1095 // The VE ABI requires a reserved area at the top of stack as described
1096 // in VEFrameLowering.cpp. So, we adjust it here.
1097 unsigned NumBytes = STI.getAdjustedFrameSize(0);
1098
1099 // Also adds the size of parameter area.
1100 if (MFI.adjustsStack() && TFL.hasReservedCallFrame(MF))
1101 NumBytes += MFI.getMaxCallFrameSize();
1102
1103 BuildMI(*MBB, MI, DL, TII.get(VE::LEArii))
1104 .addDef(MI.getOperand(0).getReg())
1105 .addReg(VE::SX11)
1106 .addImm(0)
1107 .addImm(NumBytes);
1108
1109 MI.eraseFromParent(); // The pseudo instruction is gone now.
1110 return true;
1111}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static bool isReg(const MCInst &MI, unsigned OpNo)
#define P(N)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallVector class.
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
static bool IsIntegerCC(unsigned CC)
Definition: VEInstrInfo.cpp:40
static void expandPseudoVFMK(const TargetInstrInfo &TI, MachineInstr &MI)
#define INSTRKIND(NAME)
#define NCINSTRKIND(NAME)
static Register getVM512Lower(Register reg)
static void copyPhysSubRegs(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const MCInstrDesc &MCID, unsigned int NumSubRegs, const unsigned *SubRegIdx, const TargetRegisterInfo *TRI)
static bool IsAliasOfSX(Register Reg)
static Register getVM512Upper(Register reg)
#define BRKIND(NAME)
static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID)
static void addOperandsForVFMK(MachineInstrBuilder &MIB, MachineInstr &MI, bool Upper)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
A debug info location.
Definition: DebugLoc.h:33
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:575
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
int64_t getImm() const
MachineBasicBlock * getMBB() const
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Target - Wrapper for Target specific information.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
} Stack Spill & Reload
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool expandPostRAPseudo(MachineInstr &MI) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Register getGlobalBaseReg(MachineFunction *MF) const
} Optimization
const VERegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
Definition: VEInstrInfo.h:62
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
VEInstrInfo(VESubtarget &ST)
Definition: VEInstrInfo.cpp:37
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Stack Spill & Reload {.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
Branch Analysis & Modification {.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
isStoreToStackSlot - If the specified machine instruction is a direct store to a stack slot,...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
} Branch Analysis & Modification
bool expandExtendStackPseudo(MachineInstr &MI) const
bool expandGetStackTopPseudo(MachineInstr &MI) const
uint64_t getAdjustedFrameSize(uint64_t FrameSize) const
Given a actual stack size as determined by FrameInfo, this function returns adjusted framesize which ...
Definition: VESubtarget.cpp:48
const VEInstrInfo * getInstrInfo() const override
Definition: VESubtarget.h:51
const VEFrameLowering * getFrameLowering() const override
Definition: VESubtarget.h:52
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CondCode
Definition: VE.h:42
@ CC_GENAN
Definition: VE.h:65
@ CC_EQNAN
Definition: VE.h:64
@ CC_G
Definition: VE.h:53
@ CC_NENAN
Definition: VE.h:63
@ CC_LE
Definition: VE.h:58
@ CC_LENAN
Definition: VE.h:66
@ CC_ILE
Definition: VE.h:49
@ CC_NUM
Definition: VE.h:59
@ CC_EQ
Definition: VE.h:56
@ CC_GNAN
Definition: VE.h:61
@ CC_IG
Definition: VE.h:44
@ CC_INE
Definition: VE.h:46
@ CC_AF
Definition: VE.h:52
@ CC_L
Definition: VE.h:54
@ CC_GE
Definition: VE.h:57
@ CC_NE
Definition: VE.h:55
@ CC_LNAN
Definition: VE.h:62
@ CC_IEQ
Definition: VE.h:47
@ UNKNOWN
Definition: VE.h:68
@ CC_AT
Definition: VE.h:67
@ CC_NAN
Definition: VE.h:60
@ CC_IGE
Definition: VE.h:48
@ CC_IL
Definition: VE.h:45
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static bool isCondBranchOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static bool isIndirectBranchOpcode(int Opc)
unsigned M1(unsigned Val)
Definition: VE.h:376
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
static uint64_t val2MImm(uint64_t Val)
val2MImm - Convert an integer immediate value to target MImm immediate.
Definition: VE.h:358
unsigned getKillRegState(bool B)
static uint64_t mimm2Val(uint64_t Val)
mimm2Val - Convert a target MImm immediate to an integer immediate value.
Definition: VE.h:367
static bool isUncondBranchOpcode(int Opc)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
static bool isMImmVal(uint64_t Val)
Definition: VE.h:331
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.