LLVM 20.0.0git
SystemZInstrInfo.cpp
Go to the documentation of this file.
1//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the SystemZ implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SystemZInstrInfo.h"
15#include "SystemZ.h"
16#include "SystemZInstrBuilder.h"
17#include "SystemZSubtarget.h"
18#include "llvm/ADT/Statistic.h"
35#include "llvm/MC/MCInstrDesc.h"
41#include <cassert>
42#include <cstdint>
43#include <iterator>
44
45using namespace llvm;
46
47#define GET_INSTRINFO_CTOR_DTOR
48#define GET_INSTRMAP_INFO
49#include "SystemZGenInstrInfo.inc"
50
51#define DEBUG_TYPE "systemz-II"
52
53// Return a mask with Count low bits set.
54static uint64_t allOnes(unsigned int Count) {
55 return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
56}
57
58// Pin the vtable to this file.
59void SystemZInstrInfo::anchor() {}
60
62 : SystemZGenInstrInfo(-1, -1),
63 RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister()),
64 STI(sti) {}
65
66// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
67// each having the opcode given by NewOpcode.
68void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
69 unsigned NewOpcode) const {
70 MachineBasicBlock *MBB = MI->getParent();
72
73 // Get two load or store instructions. Use the original instruction for
74 // one of them and create a clone for the other.
75 MachineInstr *HighPartMI = MF.CloneMachineInstr(&*MI);
76 MachineInstr *LowPartMI = &*MI;
77 MBB->insert(LowPartMI, HighPartMI);
78
79 // Set up the two 64-bit registers and remember super reg and its flags.
80 MachineOperand &HighRegOp = HighPartMI->getOperand(0);
81 MachineOperand &LowRegOp = LowPartMI->getOperand(0);
82 Register Reg128 = LowRegOp.getReg();
83 unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
84 unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef());
85 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
86 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
87
88 // The address in the first (high) instruction is already correct.
89 // Adjust the offset in the second (low) instruction.
90 MachineOperand &HighOffsetOp = HighPartMI->getOperand(2);
91 MachineOperand &LowOffsetOp = LowPartMI->getOperand(2);
92 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
93
94 // Set the opcodes.
95 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
96 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
97 assert(HighOpcode && LowOpcode && "Both offsets should be in range");
98 HighPartMI->setDesc(get(HighOpcode));
99 LowPartMI->setDesc(get(LowOpcode));
100
101 MachineInstr *FirstMI = HighPartMI;
102 if (MI->mayStore()) {
103 FirstMI->getOperand(0).setIsKill(false);
104 // Add implicit uses of the super register in case one of the subregs is
105 // undefined. We could track liveness and skip storing an undefined
106 // subreg, but this is hopefully rare (discovered with llvm-stress).
107 // If Reg128 was killed, set kill flag on MI.
108 unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
109 MachineInstrBuilder(MF, HighPartMI).addReg(Reg128, Reg128UndefImpl);
110 MachineInstrBuilder(MF, LowPartMI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
111 } else {
112 // If HighPartMI clobbers any of the address registers, it needs to come
113 // after LowPartMI.
114 auto overlapsAddressReg = [&](Register Reg) -> bool {
115 return RI.regsOverlap(Reg, MI->getOperand(1).getReg()) ||
116 RI.regsOverlap(Reg, MI->getOperand(3).getReg());
117 };
118 if (overlapsAddressReg(HighRegOp.getReg())) {
119 assert(!overlapsAddressReg(LowRegOp.getReg()) &&
120 "Both loads clobber address!");
121 MBB->splice(HighPartMI, MBB, LowPartMI);
122 FirstMI = LowPartMI;
123 }
124 }
125
126 // Clear the kill flags on the address registers in the first instruction.
127 FirstMI->getOperand(1).setIsKill(false);
128 FirstMI->getOperand(3).setIsKill(false);
129}
130
131// Split ADJDYNALLOC instruction MI.
132void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
133 MachineBasicBlock *MBB = MI->getParent();
134 MachineFunction &MF = *MBB->getParent();
135 MachineFrameInfo &MFFrame = MF.getFrameInfo();
136 MachineOperand &OffsetMO = MI->getOperand(2);
138
139 uint64_t Offset = (MFFrame.getMaxCallFrameSize() +
140 Regs->getCallFrameSize() +
141 Regs->getStackPointerBias() +
142 OffsetMO.getImm());
143 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
144 assert(NewOpcode && "No support for huge argument lists yet");
145 MI->setDesc(get(NewOpcode));
146 OffsetMO.setImm(Offset);
147}
148
149// MI is an RI-style pseudo instruction. Replace it with LowOpcode
150// if the first operand is a low GR32 and HighOpcode if the first operand
151// is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
152// and HighOpcode takes an unsigned 32-bit operand. In those cases,
153// MI has the same kind of operand as LowOpcode, so needs to be converted
154// if HighOpcode is used.
155void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
156 unsigned HighOpcode,
157 bool ConvertHigh) const {
158 Register Reg = MI.getOperand(0).getReg();
159 bool IsHigh = SystemZ::isHighReg(Reg);
160 MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
161 if (IsHigh && ConvertHigh)
162 MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
163}
164
165// MI is a three-operand RIE-style pseudo instruction. Replace it with
166// LowOpcodeK if the registers are both low GR32s, otherwise use a move
167// followed by HighOpcode or LowOpcode, depending on whether the target
168// is a high or low GR32.
169void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
170 unsigned LowOpcodeK,
171 unsigned HighOpcode) const {
172 Register DestReg = MI.getOperand(0).getReg();
173 Register SrcReg = MI.getOperand(1).getReg();
174 bool DestIsHigh = SystemZ::isHighReg(DestReg);
175 bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
176 if (!DestIsHigh && !SrcIsHigh)
177 MI.setDesc(get(LowOpcodeK));
178 else {
179 if (DestReg != SrcReg) {
180 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
181 SystemZ::LR, 32, MI.getOperand(1).isKill(),
182 MI.getOperand(1).isUndef());
183 MI.getOperand(1).setReg(DestReg);
184 }
185 MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
186 MI.tieOperands(0, 1);
187 }
188}
189
190// MI is an RXY-style pseudo instruction. Replace it with LowOpcode
191// if the first operand is a low GR32 and HighOpcode if the first operand
192// is a high GR32.
193void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
194 unsigned HighOpcode) const {
195 Register Reg = MI.getOperand(0).getReg();
196 unsigned Opcode = getOpcodeForOffset(
197 SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode,
198 MI.getOperand(2).getImm());
199 MI.setDesc(get(Opcode));
200}
201
202// MI is a load-on-condition pseudo instruction with a single register
203// (source or destination) operand. Replace it with LowOpcode if the
204// register is a low GR32 and HighOpcode if the register is a high GR32.
205void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
206 unsigned HighOpcode) const {
207 Register Reg = MI.getOperand(0).getReg();
208 unsigned Opcode = SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode;
209 MI.setDesc(get(Opcode));
210}
211
212// MI is an RR-style pseudo instruction that zero-extends the low Size bits
213// of one GRX32 into another. Replace it with LowOpcode if both operands
214// are low registers, otherwise use RISB[LH]G.
215void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
216 unsigned Size) const {
218 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
219 MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
220 Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef());
221
222 // Keep the remaining operands as-is.
223 for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2))
224 MIB.add(MO);
225
226 MI.eraseFromParent();
227}
228
229void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
230 MachineBasicBlock *MBB = MI->getParent();
231 MachineFunction &MF = *MBB->getParent();
232 const Register Reg64 = MI->getOperand(0).getReg();
233 const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);
234
235 // EAR can only load the low subregister so us a shift for %a0 to produce
236 // the GR containing %a0 and %a1.
237
238 // ear <reg>, %a0
239 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
240 .addReg(SystemZ::A0)
242
243 // sllg <reg>, <reg>, 32
244 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64)
245 .addReg(Reg64)
246 .addReg(0)
247 .addImm(32);
248
249 // ear <reg>, %a1
250 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
251 .addReg(SystemZ::A1);
252
253 // lg <reg>, 40(<reg>)
254 MI->setDesc(get(SystemZ::LG));
255 MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0);
256}
257
258// Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
259// DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
260// are low registers, otherwise use RISB[LH]G. Size is the number of bits
261// taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
262// KillSrc is true if this move is the last use of SrcReg.
264SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
266 const DebugLoc &DL, unsigned DestReg,
267 unsigned SrcReg, unsigned LowLowOpcode,
268 unsigned Size, bool KillSrc,
269 bool UndefSrc) const {
270 unsigned Opcode;
271 bool DestIsHigh = SystemZ::isHighReg(DestReg);
272 bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
273 if (DestIsHigh && SrcIsHigh)
274 Opcode = SystemZ::RISBHH;
275 else if (DestIsHigh && !SrcIsHigh)
276 Opcode = SystemZ::RISBHL;
277 else if (!DestIsHigh && SrcIsHigh)
278 Opcode = SystemZ::RISBLH;
279 else {
280 return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
281 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc));
282 }
283 unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
284 return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
285 .addReg(DestReg, RegState::Undef)
286 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc))
287 .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
288}
289
291 bool NewMI,
292 unsigned OpIdx1,
293 unsigned OpIdx2) const {
294 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
295 if (NewMI)
296 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
297 return MI;
298 };
299
300 switch (MI.getOpcode()) {
301 case SystemZ::SELRMux:
302 case SystemZ::SELFHR:
303 case SystemZ::SELR:
304 case SystemZ::SELGR:
305 case SystemZ::LOCRMux:
306 case SystemZ::LOCFHR:
307 case SystemZ::LOCR:
308 case SystemZ::LOCGR: {
309 auto &WorkingMI = cloneIfNew(MI);
310 // Invert condition.
311 unsigned CCValid = WorkingMI.getOperand(3).getImm();
312 unsigned CCMask = WorkingMI.getOperand(4).getImm();
313 WorkingMI.getOperand(4).setImm(CCMask ^ CCValid);
314 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
315 OpIdx1, OpIdx2);
316 }
317 default:
318 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
319 }
320}
321
322// If MI is a simple load or store for a frame object, return the register
323// it loads or stores and set FrameIndex to the index of the frame object.
324// Return 0 otherwise.
325//
326// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
327static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
328 unsigned Flag) {
329 const MCInstrDesc &MCID = MI.getDesc();
330 if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
331 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
332 FrameIndex = MI.getOperand(1).getIndex();
333 return MI.getOperand(0).getReg();
334 }
335 return 0;
336}
337
339 int &FrameIndex) const {
340 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
341}
342
344 int &FrameIndex) const {
345 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
346}
347
349 int &DestFrameIndex,
350 int &SrcFrameIndex) const {
351 // Check for MVC 0(Length,FI1),0(FI2)
352 const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo();
353 if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
354 MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
355 MI.getOperand(4).getImm() != 0)
356 return false;
357
358 // Check that Length covers the full slots.
359 int64_t Length = MI.getOperand(2).getImm();
360 unsigned FI1 = MI.getOperand(0).getIndex();
361 unsigned FI2 = MI.getOperand(3).getIndex();
362 if (MFI.getObjectSize(FI1) != Length ||
363 MFI.getObjectSize(FI2) != Length)
364 return false;
365
366 DestFrameIndex = FI1;
367 SrcFrameIndex = FI2;
368 return true;
369}
370
373 MachineBasicBlock *&FBB,
375 bool AllowModify) const {
376 // Most of the code and comments here are boilerplate.
377
378 // Start from the bottom of the block and work up, examining the
379 // terminator instructions.
381 while (I != MBB.begin()) {
382 --I;
383 if (I->isDebugInstr())
384 continue;
385
386 // Working from the bottom, when we see a non-terminator instruction, we're
387 // done.
388 if (!isUnpredicatedTerminator(*I))
389 break;
390
391 // A terminator that isn't a branch can't easily be handled by this
392 // analysis.
393 if (!I->isBranch())
394 return true;
395
396 // Can't handle indirect branches.
398 if (!Branch.hasMBBTarget())
399 return true;
400
401 // Punt on compound branches.
402 if (Branch.Type != SystemZII::BranchNormal)
403 return true;
404
405 if (Branch.CCMask == SystemZ::CCMASK_ANY) {
406 // Handle unconditional branches.
407 if (!AllowModify) {
408 TBB = Branch.getMBBTarget();
409 continue;
410 }
411
412 // If the block has any instructions after a JMP, delete them.
413 MBB.erase(std::next(I), MBB.end());
414
415 Cond.clear();
416 FBB = nullptr;
417
418 // Delete the JMP if it's equivalent to a fall-through.
419 if (MBB.isLayoutSuccessor(Branch.getMBBTarget())) {
420 TBB = nullptr;
421 I->eraseFromParent();
422 I = MBB.end();
423 continue;
424 }
425
426 // TBB is used to indicate the unconditinal destination.
427 TBB = Branch.getMBBTarget();
428 continue;
429 }
430
431 // Working from the bottom, handle the first conditional branch.
432 if (Cond.empty()) {
433 // FIXME: add X86-style branch swap
434 FBB = TBB;
435 TBB = Branch.getMBBTarget();
436 Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
437 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
438 continue;
439 }
440
441 // Handle subsequent conditional branches.
442 assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
443
444 // Only handle the case where all conditional branches branch to the same
445 // destination.
446 if (TBB != Branch.getMBBTarget())
447 return true;
448
449 // If the conditions are the same, we can leave them alone.
450 unsigned OldCCValid = Cond[0].getImm();
451 unsigned OldCCMask = Cond[1].getImm();
452 if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
453 continue;
454
455 // FIXME: Try combining conditions like X86 does. Should be easy on Z!
456 return false;
457 }
458
459 return false;
460}
461
463 int *BytesRemoved) const {
464 assert(!BytesRemoved && "code size not handled");
465
466 // Most of the code and comments here are boilerplate.
468 unsigned Count = 0;
469
470 while (I != MBB.begin()) {
471 --I;
472 if (I->isDebugInstr())
473 continue;
474 if (!I->isBranch())
475 break;
476 if (!getBranchInfo(*I).hasMBBTarget())
477 break;
478 // Remove the branch.
479 I->eraseFromParent();
480 I = MBB.end();
481 ++Count;
482 }
483
484 return Count;
485}
486
489 assert(Cond.size() == 2 && "Invalid condition");
490 Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
491 return false;
492}
493
498 const DebugLoc &DL,
499 int *BytesAdded) const {
500 // In this function we output 32-bit branches, which should always
501 // have enough range. They can be shortened and relaxed by later code
502 // in the pipeline, if desired.
503
504 // Shouldn't be a fall through.
505 assert(TBB && "insertBranch must not be told to insert a fallthrough");
506 assert((Cond.size() == 2 || Cond.size() == 0) &&
507 "SystemZ branch conditions have one component!");
508 assert(!BytesAdded && "code size not handled");
509
510 if (Cond.empty()) {
511 // Unconditional branch?
512 assert(!FBB && "Unconditional branch with multiple successors!");
513 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
514 return 1;
515 }
516
517 // Conditional branch.
518 unsigned Count = 0;
519 unsigned CCValid = Cond[0].getImm();
520 unsigned CCMask = Cond[1].getImm();
521 BuildMI(&MBB, DL, get(SystemZ::BRC))
522 .addImm(CCValid).addImm(CCMask).addMBB(TBB);
523 ++Count;
524
525 if (FBB) {
526 // Two-way Conditional branch. Insert the second branch.
527 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
528 ++Count;
529 }
530 return Count;
531}
532
534 Register &SrcReg2, int64_t &Mask,
535 int64_t &Value) const {
536 assert(MI.isCompare() && "Caller should have checked for a comparison");
537
538 if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
539 MI.getOperand(1).isImm()) {
540 SrcReg = MI.getOperand(0).getReg();
541 SrcReg2 = 0;
542 Value = MI.getOperand(1).getImm();
543 Mask = ~0;
544 return true;
545 }
546
547 return false;
548}
549
552 Register DstReg, Register TrueReg,
553 Register FalseReg, int &CondCycles,
554 int &TrueCycles,
555 int &FalseCycles) const {
556 // Not all subtargets have LOCR instructions.
557 if (!STI.hasLoadStoreOnCond())
558 return false;
559 if (Pred.size() != 2)
560 return false;
561
562 // Check register classes.
564 const TargetRegisterClass *RC =
565 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
566 if (!RC)
567 return false;
568
569 // We have LOCR instructions for 32 and 64 bit general purpose registers.
570 if ((STI.hasLoadStoreOnCond2() &&
571 SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) ||
572 SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
573 SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
574 CondCycles = 2;
575 TrueCycles = 2;
576 FalseCycles = 2;
577 return true;
578 }
579
580 // Can't do anything else.
581 return false;
582}
583
586 const DebugLoc &DL, Register DstReg,
588 Register TrueReg,
589 Register FalseReg) const {
591 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
592
593 assert(Pred.size() == 2 && "Invalid condition");
594 unsigned CCValid = Pred[0].getImm();
595 unsigned CCMask = Pred[1].getImm();
596
597 unsigned Opc;
598 if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) {
599 if (STI.hasMiscellaneousExtensions3())
600 Opc = SystemZ::SELRMux;
601 else if (STI.hasLoadStoreOnCond2())
602 Opc = SystemZ::LOCRMux;
603 else {
604 Opc = SystemZ::LOCR;
605 MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass);
606 Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
607 Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
608 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);
609 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);
610 TrueReg = TReg;
611 FalseReg = FReg;
612 }
613 } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
614 if (STI.hasMiscellaneousExtensions3())
615 Opc = SystemZ::SELGR;
616 else
617 Opc = SystemZ::LOCGR;
618 } else
619 llvm_unreachable("Invalid register class");
620
621 BuildMI(MBB, I, DL, get(Opc), DstReg)
622 .addReg(FalseReg).addReg(TrueReg)
623 .addImm(CCValid).addImm(CCMask);
624}
625
628 Register &FoldAsLoadDefReg,
629 MachineInstr *&DefMI) const {
630 // Check whether we can move the DefMI load, and that it only has one use.
631 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
632 assert(DefMI);
633 bool SawStore = false;
634 if (!DefMI->isSafeToMove(SawStore) || !MRI->hasOneNonDBGUse(FoldAsLoadDefReg))
635 return nullptr;
636
637 int UseOpIdx =
638 MI.findRegisterUseOperandIdx(FoldAsLoadDefReg, /*TRI=*/nullptr);
639 assert(UseOpIdx != -1 && "Expected FoldAsLoadDefReg to be used by MI.");
640
641 // Check whether we can fold the load.
642 if (MachineInstr *FoldMI =
643 foldMemoryOperand(MI, {((unsigned)UseOpIdx)}, *DefMI)) {
644 FoldAsLoadDefReg = 0;
645 return FoldMI;
646 }
647
648 return nullptr;
649}
650
652 Register Reg,
653 MachineRegisterInfo *MRI) const {
654 unsigned DefOpc = DefMI.getOpcode();
655
656 if (DefOpc == SystemZ::VGBM) {
657 int64_t ImmVal = DefMI.getOperand(1).getImm();
658 if (ImmVal != 0) // TODO: Handle other values
659 return false;
660
661 // Fold gr128 = COPY (vr128 VGBM imm)
662 //
663 // %tmp:gr64 = LGHI 0
664 // to gr128 = REG_SEQUENCE %tmp, %tmp
665 assert(DefMI.getOperand(0).getReg() == Reg);
666
667 if (!UseMI.isCopy())
668 return false;
669
670 Register CopyDstReg = UseMI.getOperand(0).getReg();
671 if (CopyDstReg.isVirtual() &&
672 MRI->getRegClass(CopyDstReg) == &SystemZ::GR128BitRegClass &&
673 MRI->hasOneNonDBGUse(Reg)) {
674 // TODO: Handle physical registers
675 // TODO: Handle gr64 uses with subregister indexes
676 // TODO: Should this multi-use cases?
677 Register TmpReg = MRI->createVirtualRegister(&SystemZ::GR64BitRegClass);
678 MachineBasicBlock &MBB = *UseMI.getParent();
679
680 loadImmediate(MBB, UseMI.getIterator(), TmpReg, ImmVal);
681
682 UseMI.setDesc(get(SystemZ::REG_SEQUENCE));
683 UseMI.getOperand(1).setReg(TmpReg);
685 .addImm(SystemZ::subreg_h64)
686 .addReg(TmpReg)
687 .addImm(SystemZ::subreg_l64);
688
689 if (MRI->use_nodbg_empty(Reg))
690 DefMI.eraseFromParent();
691 return true;
692 }
693
694 return false;
695 }
696
697 if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
698 DefOpc != SystemZ::LGHI)
699 return false;
700 if (DefMI.getOperand(0).getReg() != Reg)
701 return false;
702 int32_t ImmVal = (int32_t)DefMI.getOperand(1).getImm();
703
704 unsigned UseOpc = UseMI.getOpcode();
705 unsigned NewUseOpc;
706 unsigned UseIdx;
707 int CommuteIdx = -1;
708 bool TieOps = false;
709 switch (UseOpc) {
710 case SystemZ::SELRMux:
711 TieOps = true;
712 [[fallthrough]];
713 case SystemZ::LOCRMux:
714 if (!STI.hasLoadStoreOnCond2())
715 return false;
716 NewUseOpc = SystemZ::LOCHIMux;
717 if (UseMI.getOperand(2).getReg() == Reg)
718 UseIdx = 2;
719 else if (UseMI.getOperand(1).getReg() == Reg)
720 UseIdx = 2, CommuteIdx = 1;
721 else
722 return false;
723 break;
724 case SystemZ::SELGR:
725 TieOps = true;
726 [[fallthrough]];
727 case SystemZ::LOCGR:
728 if (!STI.hasLoadStoreOnCond2())
729 return false;
730 NewUseOpc = SystemZ::LOCGHI;
731 if (UseMI.getOperand(2).getReg() == Reg)
732 UseIdx = 2;
733 else if (UseMI.getOperand(1).getReg() == Reg)
734 UseIdx = 2, CommuteIdx = 1;
735 else
736 return false;
737 break;
738 default:
739 return false;
740 }
741
742 if (CommuteIdx != -1)
743 if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx))
744 return false;
745
746 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
747 UseMI.setDesc(get(NewUseOpc));
748 if (TieOps)
749 UseMI.tieOperands(0, 1);
750 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
751 if (DeleteDef)
752 DefMI.eraseFromParent();
753
754 return true;
755}
756
758 unsigned Opcode = MI.getOpcode();
759 if (Opcode == SystemZ::Return ||
760 Opcode == SystemZ::Return_XPLINK ||
761 Opcode == SystemZ::Trap ||
762 Opcode == SystemZ::CallJG ||
763 Opcode == SystemZ::CallBR)
764 return true;
765 return false;
766}
767
770 unsigned NumCycles, unsigned ExtraPredCycles,
771 BranchProbability Probability) const {
772 // Avoid using conditional returns at the end of a loop (since then
773 // we'd need to emit an unconditional branch to the beginning anyway,
774 // making the loop body longer). This doesn't apply for low-probability
775 // loops (eg. compare-and-swap retry), so just decide based on branch
776 // probability instead of looping structure.
777 // However, since Compare and Trap instructions cost the same as a regular
778 // Compare instruction, we should allow the if conversion to convert this
779 // into a Conditional Compare regardless of the branch probability.
780 if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
781 MBB.succ_empty() && Probability < BranchProbability(1, 8))
782 return false;
783 // For now only convert single instructions.
784 return NumCycles == 1;
785}
786
789 unsigned NumCyclesT, unsigned ExtraPredCyclesT,
790 MachineBasicBlock &FMBB,
791 unsigned NumCyclesF, unsigned ExtraPredCyclesF,
792 BranchProbability Probability) const {
793 // For now avoid converting mutually-exclusive cases.
794 return false;
795}
796
799 BranchProbability Probability) const {
800 // For now only duplicate single instructions.
801 return NumCycles == 1;
802}
803
806 assert(Pred.size() == 2 && "Invalid condition");
807 unsigned CCValid = Pred[0].getImm();
808 unsigned CCMask = Pred[1].getImm();
809 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
810 unsigned Opcode = MI.getOpcode();
811 if (Opcode == SystemZ::Trap) {
812 MI.setDesc(get(SystemZ::CondTrap));
813 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
814 .addImm(CCValid).addImm(CCMask)
815 .addReg(SystemZ::CC, RegState::Implicit);
816 return true;
817 }
818 if (Opcode == SystemZ::Return || Opcode == SystemZ::Return_XPLINK) {
819 MI.setDesc(get(Opcode == SystemZ::Return ? SystemZ::CondReturn
820 : SystemZ::CondReturn_XPLINK));
821 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
822 .addImm(CCValid)
823 .addImm(CCMask)
824 .addReg(SystemZ::CC, RegState::Implicit);
825 return true;
826 }
827 if (Opcode == SystemZ::CallJG) {
828 MachineOperand FirstOp = MI.getOperand(0);
829 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
830 MI.removeOperand(1);
831 MI.removeOperand(0);
832 MI.setDesc(get(SystemZ::CallBRCL));
833 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
834 .addImm(CCValid)
835 .addImm(CCMask)
836 .add(FirstOp)
837 .addRegMask(RegMask)
838 .addReg(SystemZ::CC, RegState::Implicit);
839 return true;
840 }
841 if (Opcode == SystemZ::CallBR) {
842 MachineOperand Target = MI.getOperand(0);
843 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
844 MI.removeOperand(1);
845 MI.removeOperand(0);
846 MI.setDesc(get(SystemZ::CallBCR));
847 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
848 .addImm(CCValid).addImm(CCMask)
849 .add(Target)
850 .addRegMask(RegMask)
851 .addReg(SystemZ::CC, RegState::Implicit);
852 return true;
853 }
854 return false;
855}
856
859 const DebugLoc &DL, MCRegister DestReg,
860 MCRegister SrcReg, bool KillSrc) const {
861 // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
862 // super register in case one of the subregs is undefined.
863 // This handles ADDR128 too.
864 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
865 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
866 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
867 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
868 .addReg(SrcReg, RegState::Implicit);
869 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
870 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
871 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
872 .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
873 return;
874 }
875
876 if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
877 emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc,
878 false);
879 return;
880 }
881
882 // Move 128-bit floating-point values between VR128 and FP128.
883 if (SystemZ::VR128BitRegClass.contains(DestReg) &&
884 SystemZ::FP128BitRegClass.contains(SrcReg)) {
885 MCRegister SrcRegHi =
886 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64),
887 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
888 MCRegister SrcRegLo =
889 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64),
890 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
891
892 BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg)
893 .addReg(SrcRegHi, getKillRegState(KillSrc))
894 .addReg(SrcRegLo, getKillRegState(KillSrc));
895 return;
896 }
897 if (SystemZ::FP128BitRegClass.contains(DestReg) &&
898 SystemZ::VR128BitRegClass.contains(SrcReg)) {
899 MCRegister DestRegHi =
900 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64),
901 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
902 MCRegister DestRegLo =
903 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64),
904 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
905
906 if (DestRegHi != SrcReg)
907 copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false);
908 BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo)
909 .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1);
910 return;
911 }
912
913 if (SystemZ::FP128BitRegClass.contains(DestReg) &&
914 SystemZ::GR128BitRegClass.contains(SrcReg)) {
915 MCRegister DestRegHi = RI.getSubReg(DestReg, SystemZ::subreg_h64);
916 MCRegister DestRegLo = RI.getSubReg(DestReg, SystemZ::subreg_l64);
917 MCRegister SrcRegHi = RI.getSubReg(SrcReg, SystemZ::subreg_h64);
918 MCRegister SrcRegLo = RI.getSubReg(SrcReg, SystemZ::subreg_l64);
919
920 BuildMI(MBB, MBBI, DL, get(SystemZ::LDGR), DestRegHi)
921 .addReg(SrcRegHi)
923
924 BuildMI(MBB, MBBI, DL, get(SystemZ::LDGR), DestRegLo)
925 .addReg(SrcRegLo, getKillRegState(KillSrc));
926 return;
927 }
928
929 // Move CC value from a GR32.
930 if (DestReg == SystemZ::CC) {
931 unsigned Opcode =
932 SystemZ::GR32BitRegClass.contains(SrcReg) ? SystemZ::TMLH : SystemZ::TMHH;
933 BuildMI(MBB, MBBI, DL, get(Opcode))
934 .addReg(SrcReg, getKillRegState(KillSrc))
935 .addImm(3 << (SystemZ::IPM_CC - 16));
936 return;
937 }
938
939 if (SystemZ::GR128BitRegClass.contains(DestReg) &&
940 SystemZ::VR128BitRegClass.contains(SrcReg)) {
941 MCRegister DestH64 = RI.getSubReg(DestReg, SystemZ::subreg_h64);
942 MCRegister DestL64 = RI.getSubReg(DestReg, SystemZ::subreg_l64);
943
944 BuildMI(MBB, MBBI, DL, get(SystemZ::VLGVG), DestH64)
945 .addReg(SrcReg)
946 .addReg(SystemZ::NoRegister)
947 .addImm(0)
948 .addDef(DestReg, RegState::Implicit);
949 BuildMI(MBB, MBBI, DL, get(SystemZ::VLGVG), DestL64)
950 .addReg(SrcReg, getKillRegState(KillSrc))
951 .addReg(SystemZ::NoRegister)
952 .addImm(1);
953 return;
954 }
955
956 if (SystemZ::VR128BitRegClass.contains(DestReg) &&
957 SystemZ::GR128BitRegClass.contains(SrcReg)) {
958 BuildMI(MBB, MBBI, DL, get(SystemZ::VLVGP), DestReg)
959 .addReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64))
960 .addReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64));
961 return;
962 }
963
964 // Everything else needs only one instruction.
965 unsigned Opcode;
966 if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
967 Opcode = SystemZ::LGR;
968 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
969 // For z13 we prefer LDR over LER to avoid partial register dependencies.
970 Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
971 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
972 Opcode = SystemZ::LDR;
973 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
974 Opcode = SystemZ::LXR;
975 else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
976 Opcode = SystemZ::VLR32;
977 else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
978 Opcode = SystemZ::VLR64;
979 else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
980 Opcode = SystemZ::VLR;
981 else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg))
982 Opcode = SystemZ::CPYA;
983 else
984 llvm_unreachable("Impossible reg-to-reg copy");
985
986 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
987 .addReg(SrcReg, getKillRegState(KillSrc));
988}
989
992 bool isKill, int FrameIdx, const TargetRegisterClass *RC,
993 const TargetRegisterInfo *TRI, Register VReg) const {
994 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
995
996 // Callers may expect a single instruction, so keep 128-bit moves
997 // together for now and lower them after register allocation.
998 unsigned LoadOpcode, StoreOpcode;
999 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
1000 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
1001 .addReg(SrcReg, getKillRegState(isKill)),
1002 FrameIdx);
1003}
1004
1007 Register DestReg, int FrameIdx,
1008 const TargetRegisterClass *RC,
1009 const TargetRegisterInfo *TRI,
1010 Register VReg) const {
1011 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1012
1013 // Callers may expect a single instruction, so keep 128-bit moves
1014 // together for now and lower them after register allocation.
1015 unsigned LoadOpcode, StoreOpcode;
1016 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
1017 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
1018 FrameIdx);
1019}
1020
1021// Return true if MI is a simple load or store with a 12-bit displacement
1022// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
1023static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
1024 const MCInstrDesc &MCID = MI->getDesc();
1025 return ((MCID.TSFlags & Flag) &&
1026 isUInt<12>(MI->getOperand(2).getImm()) &&
1027 MI->getOperand(3).getReg() == 0);
1028}
1029
1030namespace {
1031
1032struct LogicOp {
1033 LogicOp() = default;
1034 LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
1035 : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
1036
1037 explicit operator bool() const { return RegSize; }
1038
1039 unsigned RegSize = 0;
1040 unsigned ImmLSB = 0;
1041 unsigned ImmSize = 0;
1042};
1043
1044} // end anonymous namespace
1045
1046static LogicOp interpretAndImmediate(unsigned Opcode) {
1047 switch (Opcode) {
1048 case SystemZ::NILMux: return LogicOp(32, 0, 16);
1049 case SystemZ::NIHMux: return LogicOp(32, 16, 16);
1050 case SystemZ::NILL64: return LogicOp(64, 0, 16);
1051 case SystemZ::NILH64: return LogicOp(64, 16, 16);
1052 case SystemZ::NIHL64: return LogicOp(64, 32, 16);
1053 case SystemZ::NIHH64: return LogicOp(64, 48, 16);
1054 case SystemZ::NIFMux: return LogicOp(32, 0, 32);
1055 case SystemZ::NILF64: return LogicOp(64, 0, 32);
1056 case SystemZ::NIHF64: return LogicOp(64, 32, 32);
1057 default: return LogicOp();
1058 }
1059}
1060
1061static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
1062 if (OldMI->registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr)) {
1063 MachineOperand *CCDef =
1064 NewMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr);
1065 if (CCDef != nullptr)
1066 CCDef->setIsDead(true);
1067 }
1068}
1069
1070static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI,
1071 MachineInstr::MIFlag Flag) {
1072 if (OldMI->getFlag(Flag))
1073 NewMI->setFlag(Flag);
1074}
1075
1078 LiveIntervals *LIS) const {
1079 MachineBasicBlock *MBB = MI.getParent();
1080
1081 // Try to convert an AND into an RISBG-type instruction.
1082 // TODO: It might be beneficial to select RISBG and shorten to AND instead.
1083 if (LogicOp And = interpretAndImmediate(MI.getOpcode())) {
1084 uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
1085 // AND IMMEDIATE leaves the other bits of the register unchanged.
1086 Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
1087 unsigned Start, End;
1088 if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
1089 unsigned NewOpcode;
1090 if (And.RegSize == 64) {
1091 NewOpcode = SystemZ::RISBG;
1092 // Prefer RISBGN if available, since it does not clobber CC.
1093 if (STI.hasMiscellaneousExtensions())
1094 NewOpcode = SystemZ::RISBGN;
1095 } else {
1096 NewOpcode = SystemZ::RISBMux;
1097 Start &= 31;
1098 End &= 31;
1099 }
1100 MachineOperand &Dest = MI.getOperand(0);
1101 MachineOperand &Src = MI.getOperand(1);
1103 BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
1104 .add(Dest)
1105 .addReg(0)
1106 .addReg(Src.getReg(), getKillRegState(Src.isKill()),
1107 Src.getSubReg())
1108 .addImm(Start)
1109 .addImm(End + 128)
1110 .addImm(0);
1111 if (LV) {
1112 unsigned NumOps = MI.getNumOperands();
1113 for (unsigned I = 1; I < NumOps; ++I) {
1114 MachineOperand &Op = MI.getOperand(I);
1115 if (Op.isReg() && Op.isKill())
1116 LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1117 }
1118 }
1119 if (LIS)
1120 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1121 transferDeadCC(&MI, MIB);
1122 return MIB;
1123 }
1124 }
1125 return nullptr;
1126}
1127
1129 bool Invert) const {
1130 unsigned Opc = Inst.getOpcode();
1131 if (Invert) {
1132 auto InverseOpcode = getInverseOpcode(Opc);
1133 if (!InverseOpcode)
1134 return false;
1135 Opc = *InverseOpcode;
1136 }
1137
1138 switch (Opc) {
1139 default:
1140 break;
1141 // Adds and multiplications.
1142 case SystemZ::WFADB:
1143 case SystemZ::WFASB:
1144 case SystemZ::WFAXB:
1145 case SystemZ::VFADB:
1146 case SystemZ::VFASB:
1147 case SystemZ::WFMDB:
1148 case SystemZ::WFMSB:
1149 case SystemZ::WFMXB:
1150 case SystemZ::VFMDB:
1151 case SystemZ::VFMSB:
1154 }
1155
1156 return false;
1157}
1158
1159std::optional<unsigned>
1161 // fadd => fsub
1162 switch (Opcode) {
1163 case SystemZ::WFADB:
1164 return SystemZ::WFSDB;
1165 case SystemZ::WFASB:
1166 return SystemZ::WFSSB;
1167 case SystemZ::WFAXB:
1168 return SystemZ::WFSXB;
1169 case SystemZ::VFADB:
1170 return SystemZ::VFSDB;
1171 case SystemZ::VFASB:
1172 return SystemZ::VFSSB;
1173 // fsub => fadd
1174 case SystemZ::WFSDB:
1175 return SystemZ::WFADB;
1176 case SystemZ::WFSSB:
1177 return SystemZ::WFASB;
1178 case SystemZ::WFSXB:
1179 return SystemZ::WFAXB;
1180 case SystemZ::VFSDB:
1181 return SystemZ::VFADB;
1182 case SystemZ::VFSSB:
1183 return SystemZ::VFASB;
1184 default:
1185 return std::nullopt;
1186 }
1187}
1188
1191 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1192 LiveIntervals *LIS, VirtRegMap *VRM) const {
1195 const MachineFrameInfo &MFI = MF.getFrameInfo();
1196 unsigned Size = MFI.getObjectSize(FrameIndex);
1197 unsigned Opcode = MI.getOpcode();
1198
1199 // Check CC liveness if new instruction introduces a dead def of CC.
1200 SlotIndex MISlot = SlotIndex();
1201 LiveRange *CCLiveRange = nullptr;
1202 bool CCLiveAtMI = true;
1203 if (LIS) {
1204 MISlot = LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
1205 auto CCUnits = TRI->regunits(MCRegister::from(SystemZ::CC));
1206 assert(range_size(CCUnits) == 1 && "CC only has one reg unit.");
1207 CCLiveRange = &LIS->getRegUnit(*CCUnits.begin());
1208 CCLiveAtMI = CCLiveRange->liveAt(MISlot);
1209 }
1210
1211 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1212 if (!CCLiveAtMI && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
1213 isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
1214 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1215 MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
1216 MI.getDebugLoc(), get(SystemZ::AGSI))
1217 .addFrameIndex(FrameIndex)
1218 .addImm(0)
1219 .addImm(MI.getOperand(2).getImm());
1220 BuiltMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr)
1221 ->setIsDead(true);
1222 CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1223 return BuiltMI;
1224 }
1225 return nullptr;
1226 }
1227
1228 // All other cases require a single operand.
1229 if (Ops.size() != 1)
1230 return nullptr;
1231
1232 unsigned OpNum = Ops[0];
1233 assert(Size * 8 ==
1234 TRI->getRegSizeInBits(*MF.getRegInfo()
1235 .getRegClass(MI.getOperand(OpNum).getReg())) &&
1236 "Invalid size combination");
1237
1238 if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
1239 isInt<8>(MI.getOperand(2).getImm())) {
1240 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1241 Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
1242 MachineInstr *BuiltMI =
1243 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1244 .addFrameIndex(FrameIndex)
1245 .addImm(0)
1246 .addImm(MI.getOperand(2).getImm());
1247 transferDeadCC(&MI, BuiltMI);
1249 return BuiltMI;
1250 }
1251
1252 if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
1253 isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
1254 (Opcode == SystemZ::ALGFI && OpNum == 0 &&
1255 isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
1256 // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1257 Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
1258 MachineInstr *BuiltMI =
1259 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1260 .addFrameIndex(FrameIndex)
1261 .addImm(0)
1262 .addImm((int8_t)MI.getOperand(2).getImm());
1263 transferDeadCC(&MI, BuiltMI);
1264 return BuiltMI;
1265 }
1266
1267 if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
1268 isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
1269 (Opcode == SystemZ::SLGFI && OpNum == 0 &&
1270 isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
1271 // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1272 Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
1273 MachineInstr *BuiltMI =
1274 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1275 .addFrameIndex(FrameIndex)
1276 .addImm(0)
1277 .addImm((int8_t)-MI.getOperand(2).getImm());
1278 transferDeadCC(&MI, BuiltMI);
1279 return BuiltMI;
1280 }
1281
1282 unsigned MemImmOpc = 0;
1283 switch (Opcode) {
1284 case SystemZ::LHIMux:
1285 case SystemZ::LHI: MemImmOpc = SystemZ::MVHI; break;
1286 case SystemZ::LGHI: MemImmOpc = SystemZ::MVGHI; break;
1287 case SystemZ::CHIMux:
1288 case SystemZ::CHI: MemImmOpc = SystemZ::CHSI; break;
1289 case SystemZ::CGHI: MemImmOpc = SystemZ::CGHSI; break;
1290 case SystemZ::CLFIMux:
1291 case SystemZ::CLFI:
1292 if (isUInt<16>(MI.getOperand(1).getImm()))
1293 MemImmOpc = SystemZ::CLFHSI;
1294 break;
1295 case SystemZ::CLGFI:
1296 if (isUInt<16>(MI.getOperand(1).getImm()))
1297 MemImmOpc = SystemZ::CLGHSI;
1298 break;
1299 default: break;
1300 }
1301 if (MemImmOpc)
1302 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1303 get(MemImmOpc))
1304 .addFrameIndex(FrameIndex)
1305 .addImm(0)
1306 .addImm(MI.getOperand(1).getImm());
1307
1308 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
1309 bool Op0IsGPR = (Opcode == SystemZ::LGDR);
1310 bool Op1IsGPR = (Opcode == SystemZ::LDGR);
1311 // If we're spilling the destination of an LDGR or LGDR, store the
1312 // source register instead.
1313 if (OpNum == 0) {
1314 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
1315 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1316 get(StoreOpcode))
1317 .add(MI.getOperand(1))
1318 .addFrameIndex(FrameIndex)
1319 .addImm(0)
1320 .addReg(0);
1321 }
1322 // If we're spilling the source of an LDGR or LGDR, load the
1323 // destination register instead.
1324 if (OpNum == 1) {
1325 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
1326 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1327 get(LoadOpcode))
1328 .add(MI.getOperand(0))
1329 .addFrameIndex(FrameIndex)
1330 .addImm(0)
1331 .addReg(0);
1332 }
1333 }
1334
1335 // Look for cases where the source of a simple store or the destination
1336 // of a simple load is being spilled. Try to use MVC instead.
1337 //
1338 // Although MVC is in practice a fast choice in these cases, it is still
1339 // logically a bytewise copy. This means that we cannot use it if the
1340 // load or store is volatile. We also wouldn't be able to use MVC if
1341 // the two memories partially overlap, but that case cannot occur here,
1342 // because we know that one of the memories is a full frame index.
1343 //
1344 // For performance reasons, we also want to avoid using MVC if the addresses
1345 // might be equal. We don't worry about that case here, because spill slot
1346 // coloring happens later, and because we have special code to remove
1347 // MVCs that turn out to be redundant.
1348 if (OpNum == 0 && MI.hasOneMemOperand()) {
1349 MachineMemOperand *MMO = *MI.memoperands_begin();
1350 if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) {
1351 // Handle conversion of loads.
1353 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1354 get(SystemZ::MVC))
1355 .addFrameIndex(FrameIndex)
1356 .addImm(0)
1357 .addImm(Size)
1358 .add(MI.getOperand(1))
1359 .addImm(MI.getOperand(2).getImm())
1360 .addMemOperand(MMO);
1361 }
1362 // Handle conversion of stores.
1364 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1365 get(SystemZ::MVC))
1366 .add(MI.getOperand(1))
1367 .addImm(MI.getOperand(2).getImm())
1368 .addImm(Size)
1369 .addFrameIndex(FrameIndex)
1370 .addImm(0)
1371 .addMemOperand(MMO);
1372 }
1373 }
1374 }
1375
1376 // If the spilled operand is the final one or the instruction is
1377 // commutable, try to change <INSN>R into <INSN>. Don't introduce a def of
1378 // CC if it is live and MI does not define it.
1379 unsigned NumOps = MI.getNumExplicitOperands();
1380 int MemOpcode = SystemZ::getMemOpcode(Opcode);
1381 if (MemOpcode == -1 ||
1382 (CCLiveAtMI && !MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) &&
1383 get(MemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)))
1384 return nullptr;
1385
1386 // Check if all other vregs have a usable allocation in the case of vector
1387 // to FP conversion.
1388 const MCInstrDesc &MCID = MI.getDesc();
1389 for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) {
1390 const MCOperandInfo &MCOI = MCID.operands()[I];
1391 if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum)
1392 continue;
1393 const TargetRegisterClass *RC = TRI->getRegClass(MCOI.RegClass);
1394 if (RC == &SystemZ::VR32BitRegClass || RC == &SystemZ::VR64BitRegClass) {
1395 Register Reg = MI.getOperand(I).getReg();
1396 Register PhysReg = Reg.isVirtual()
1397 ? (VRM ? Register(VRM->getPhys(Reg)) : Register())
1398 : Reg;
1399 if (!PhysReg ||
1400 !(SystemZ::FP32BitRegClass.contains(PhysReg) ||
1401 SystemZ::FP64BitRegClass.contains(PhysReg) ||
1402 SystemZ::VF128BitRegClass.contains(PhysReg)))
1403 return nullptr;
1404 }
1405 }
1406 // Fused multiply and add/sub need to have the same dst and accumulator reg.
1407 bool FusedFPOp = (Opcode == SystemZ::WFMADB || Opcode == SystemZ::WFMASB ||
1408 Opcode == SystemZ::WFMSDB || Opcode == SystemZ::WFMSSB);
1409 if (FusedFPOp) {
1410 Register DstReg = VRM->getPhys(MI.getOperand(0).getReg());
1411 Register AccReg = VRM->getPhys(MI.getOperand(3).getReg());
1412 if (OpNum == 0 || OpNum == 3 || DstReg != AccReg)
1413 return nullptr;
1414 }
1415
1416 // Try to swap compare operands if possible.
1417 bool NeedsCommute = false;
1418 if ((MI.getOpcode() == SystemZ::CR || MI.getOpcode() == SystemZ::CGR ||
1419 MI.getOpcode() == SystemZ::CLR || MI.getOpcode() == SystemZ::CLGR ||
1420 MI.getOpcode() == SystemZ::WFCDB || MI.getOpcode() == SystemZ::WFCSB ||
1421 MI.getOpcode() == SystemZ::WFKDB || MI.getOpcode() == SystemZ::WFKSB) &&
1422 OpNum == 0 && prepareCompareSwapOperands(MI))
1423 NeedsCommute = true;
1424
1425 bool CCOperands = false;
1426 if (MI.getOpcode() == SystemZ::LOCRMux || MI.getOpcode() == SystemZ::LOCGR ||
1427 MI.getOpcode() == SystemZ::SELRMux || MI.getOpcode() == SystemZ::SELGR) {
1428 assert(MI.getNumOperands() == 6 && NumOps == 5 &&
1429 "LOCR/SELR instruction operands corrupt?");
1430 NumOps -= 2;
1431 CCOperands = true;
1432 }
1433
1434 // See if this is a 3-address instruction that is convertible to 2-address
1435 // and suitable for folding below. Only try this with virtual registers
1436 // and a provided VRM (during regalloc).
1437 if (NumOps == 3 && SystemZ::getTargetMemOpcode(MemOpcode) != -1) {
1438 if (VRM == nullptr)
1439 return nullptr;
1440 else {
1441 Register DstReg = MI.getOperand(0).getReg();
1442 Register DstPhys =
1443 (DstReg.isVirtual() ? Register(VRM->getPhys(DstReg)) : DstReg);
1444 Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
1445 : ((OpNum == 1 && MI.isCommutable())
1446 ? MI.getOperand(2).getReg()
1447 : Register()));
1448 if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
1449 SrcReg.isVirtual() && DstPhys == VRM->getPhys(SrcReg))
1450 NeedsCommute = (OpNum == 1);
1451 else
1452 return nullptr;
1453 }
1454 }
1455
1456 if ((OpNum == NumOps - 1) || NeedsCommute || FusedFPOp) {
1457 const MCInstrDesc &MemDesc = get(MemOpcode);
1458 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
1459 assert(AccessBytes != 0 && "Size of access should be known");
1460 assert(AccessBytes <= Size && "Access outside the frame index");
1461 uint64_t Offset = Size - AccessBytes;
1462 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
1463 MI.getDebugLoc(), get(MemOpcode));
1464 if (MI.isCompare()) {
1465 assert(NumOps == 2 && "Expected 2 register operands for a compare.");
1466 MIB.add(MI.getOperand(NeedsCommute ? 1 : 0));
1467 }
1468 else if (FusedFPOp) {
1469 MIB.add(MI.getOperand(0));
1470 MIB.add(MI.getOperand(3));
1471 MIB.add(MI.getOperand(OpNum == 1 ? 2 : 1));
1472 }
1473 else {
1474 MIB.add(MI.getOperand(0));
1475 if (NeedsCommute)
1476 MIB.add(MI.getOperand(2));
1477 else
1478 for (unsigned I = 1; I < OpNum; ++I)
1479 MIB.add(MI.getOperand(I));
1480 }
1481 MIB.addFrameIndex(FrameIndex).addImm(Offset);
1482 if (MemDesc.TSFlags & SystemZII::HasIndex)
1483 MIB.addReg(0);
1484 if (CCOperands) {
1485 unsigned CCValid = MI.getOperand(NumOps).getImm();
1486 unsigned CCMask = MI.getOperand(NumOps + 1).getImm();
1487 MIB.addImm(CCValid);
1488 MIB.addImm(NeedsCommute ? CCMask ^ CCValid : CCMask);
1489 }
1490 if (MIB->definesRegister(SystemZ::CC, /*TRI=*/nullptr) &&
1491 (!MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) ||
1492 MI.registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr))) {
1493 MIB->addRegisterDead(SystemZ::CC, TRI);
1494 if (CCLiveRange)
1495 CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1496 }
1497 // Constrain the register classes if converted from a vector opcode. The
1498 // allocated regs are in an FP reg-class per previous check above.
1499 for (const MachineOperand &MO : MIB->operands())
1500 if (MO.isReg() && MO.getReg().isVirtual()) {
1501 Register Reg = MO.getReg();
1502 if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass)
1503 MRI.setRegClass(Reg, &SystemZ::FP32BitRegClass);
1504 else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass)
1505 MRI.setRegClass(Reg, &SystemZ::FP64BitRegClass);
1506 else if (MRI.getRegClass(Reg) == &SystemZ::VR128BitRegClass)
1507 MRI.setRegClass(Reg, &SystemZ::VF128BitRegClass);
1508 }
1509
1510 transferDeadCC(&MI, MIB);
1513 return MIB;
1514 }
1515
1516 return nullptr;
1517}
1518
1521 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1522 LiveIntervals *LIS) const {
1524 MachineBasicBlock *MBB = MI.getParent();
1525
1526 // For reassociable FP operations, any loads have been purposefully left
1527 // unfolded so that MachineCombiner can do its work on reg/reg
1528 // opcodes. After that, as many loads as possible are now folded.
1529 // TODO: This may be beneficial with other opcodes as well as machine-sink
1530 // can move loads close to their user in a different MBB, which the isel
1531 // matcher did not see.
1532 unsigned LoadOpc = 0;
1533 unsigned RegMemOpcode = 0;
1534 const TargetRegisterClass *FPRC = nullptr;
1535 RegMemOpcode = MI.getOpcode() == SystemZ::WFADB ? SystemZ::ADB
1536 : MI.getOpcode() == SystemZ::WFSDB ? SystemZ::SDB
1537 : MI.getOpcode() == SystemZ::WFMDB ? SystemZ::MDB
1538 : 0;
1539 if (RegMemOpcode) {
1540 LoadOpc = SystemZ::VL64;
1541 FPRC = &SystemZ::FP64BitRegClass;
1542 } else {
1543 RegMemOpcode = MI.getOpcode() == SystemZ::WFASB ? SystemZ::AEB
1544 : MI.getOpcode() == SystemZ::WFSSB ? SystemZ::SEB
1545 : MI.getOpcode() == SystemZ::WFMSB ? SystemZ::MEEB
1546 : 0;
1547 if (RegMemOpcode) {
1548 LoadOpc = SystemZ::VL32;
1549 FPRC = &SystemZ::FP32BitRegClass;
1550 }
1551 }
1552 if (!RegMemOpcode || LoadMI.getOpcode() != LoadOpc)
1553 return nullptr;
1554
1555 // If RegMemOpcode clobbers CC, first make sure CC is not live at this point.
1556 if (get(RegMemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)) {
1557 assert(LoadMI.getParent() == MI.getParent() && "Assuming a local fold.");
1558 assert(LoadMI != InsertPt && "Assuming InsertPt not to be first in MBB.");
1559 for (MachineBasicBlock::iterator MII = std::prev(InsertPt);;
1560 --MII) {
1561 if (MII->definesRegister(SystemZ::CC, /*TRI=*/nullptr)) {
1562 if (!MII->registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr))
1563 return nullptr;
1564 break;
1565 }
1566 if (MII == MBB->begin()) {
1567 if (MBB->isLiveIn(SystemZ::CC))
1568 return nullptr;
1569 break;
1570 }
1571 }
1572 }
1573
1574 Register FoldAsLoadDefReg = LoadMI.getOperand(0).getReg();
1575 if (Ops.size() != 1 || FoldAsLoadDefReg != MI.getOperand(Ops[0]).getReg())
1576 return nullptr;
1577 Register DstReg = MI.getOperand(0).getReg();
1578 MachineOperand LHS = MI.getOperand(1);
1579 MachineOperand RHS = MI.getOperand(2);
1580 MachineOperand &RegMO = RHS.getReg() == FoldAsLoadDefReg ? LHS : RHS;
1581 if ((RegMemOpcode == SystemZ::SDB || RegMemOpcode == SystemZ::SEB) &&
1582 FoldAsLoadDefReg != RHS.getReg())
1583 return nullptr;
1584
1585 MachineOperand &Base = LoadMI.getOperand(1);
1586 MachineOperand &Disp = LoadMI.getOperand(2);
1587 MachineOperand &Indx = LoadMI.getOperand(3);
1589 BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(RegMemOpcode), DstReg)
1590 .add(RegMO)
1591 .add(Base)
1592 .add(Disp)
1593 .add(Indx);
1594 MIB->addRegisterDead(SystemZ::CC, &RI);
1595 MRI->setRegClass(DstReg, FPRC);
1596 MRI->setRegClass(RegMO.getReg(), FPRC);
1598
1599 return MIB;
1600}
1601
1603 switch (MI.getOpcode()) {
1604 case SystemZ::L128:
1605 splitMove(MI, SystemZ::LG);
1606 return true;
1607
1608 case SystemZ::ST128:
1609 splitMove(MI, SystemZ::STG);
1610 return true;
1611
1612 case SystemZ::LX:
1613 splitMove(MI, SystemZ::LD);
1614 return true;
1615
1616 case SystemZ::STX:
1617 splitMove(MI, SystemZ::STD);
1618 return true;
1619
1620 case SystemZ::LBMux:
1621 expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
1622 return true;
1623
1624 case SystemZ::LHMux:
1625 expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
1626 return true;
1627
1628 case SystemZ::LLCRMux:
1629 expandZExtPseudo(MI, SystemZ::LLCR, 8);
1630 return true;
1631
1632 case SystemZ::LLHRMux:
1633 expandZExtPseudo(MI, SystemZ::LLHR, 16);
1634 return true;
1635
1636 case SystemZ::LLCMux:
1637 expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
1638 return true;
1639
1640 case SystemZ::LLHMux:
1641 expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
1642 return true;
1643
1644 case SystemZ::LMux:
1645 expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
1646 return true;
1647
1648 case SystemZ::LOCMux:
1649 expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH);
1650 return true;
1651
1652 case SystemZ::LOCHIMux:
1653 expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);
1654 return true;
1655
1656 case SystemZ::STCMux:
1657 expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
1658 return true;
1659
1660 case SystemZ::STHMux:
1661 expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
1662 return true;
1663
1664 case SystemZ::STMux:
1665 expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
1666 return true;
1667
1668 case SystemZ::STOCMux:
1669 expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH);
1670 return true;
1671
1672 case SystemZ::LHIMux:
1673 expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
1674 return true;
1675
1676 case SystemZ::IIFMux:
1677 expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
1678 return true;
1679
1680 case SystemZ::IILMux:
1681 expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
1682 return true;
1683
1684 case SystemZ::IIHMux:
1685 expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
1686 return true;
1687
1688 case SystemZ::NIFMux:
1689 expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
1690 return true;
1691
1692 case SystemZ::NILMux:
1693 expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
1694 return true;
1695
1696 case SystemZ::NIHMux:
1697 expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
1698 return true;
1699
1700 case SystemZ::OIFMux:
1701 expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
1702 return true;
1703
1704 case SystemZ::OILMux:
1705 expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
1706 return true;
1707
1708 case SystemZ::OIHMux:
1709 expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
1710 return true;
1711
1712 case SystemZ::XIFMux:
1713 expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
1714 return true;
1715
1716 case SystemZ::TMLMux:
1717 expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
1718 return true;
1719
1720 case SystemZ::TMHMux:
1721 expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
1722 return true;
1723
1724 case SystemZ::AHIMux:
1725 expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
1726 return true;
1727
1728 case SystemZ::AHIMuxK:
1729 expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
1730 return true;
1731
1732 case SystemZ::AFIMux:
1733 expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
1734 return true;
1735
1736 case SystemZ::CHIMux:
1737 expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false);
1738 return true;
1739
1740 case SystemZ::CFIMux:
1741 expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
1742 return true;
1743
1744 case SystemZ::CLFIMux:
1745 expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
1746 return true;
1747
1748 case SystemZ::CMux:
1749 expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
1750 return true;
1751
1752 case SystemZ::CLMux:
1753 expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
1754 return true;
1755
1756 case SystemZ::RISBMux: {
1757 bool DestIsHigh = SystemZ::isHighReg(MI.getOperand(0).getReg());
1758 bool SrcIsHigh = SystemZ::isHighReg(MI.getOperand(2).getReg());
1759 if (SrcIsHigh == DestIsHigh)
1760 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
1761 else {
1762 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
1763 MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
1764 }
1765 return true;
1766 }
1767
1768 case SystemZ::ADJDYNALLOC:
1769 splitAdjDynAlloc(MI);
1770 return true;
1771
1772 case TargetOpcode::LOAD_STACK_GUARD:
1773 expandLoadStackGuard(&MI);
1774 return true;
1775
1776 default:
1777 return false;
1778 }
1779}
1780
1782 if (MI.isInlineAsm()) {
1783 const MachineFunction *MF = MI.getParent()->getParent();
1784 const char *AsmStr = MI.getOperand(0).getSymbolName();
1785 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1786 }
1787 else if (MI.getOpcode() == SystemZ::PATCHPOINT)
1789 else if (MI.getOpcode() == SystemZ::STACKMAP)
1790 return MI.getOperand(1).getImm();
1791 else if (MI.getOpcode() == SystemZ::FENTRY_CALL)
1792 return 6;
1793
1794 return MI.getDesc().getSize();
1795}
1796
1799 switch (MI.getOpcode()) {
1800 case SystemZ::BR:
1801 case SystemZ::BI:
1802 case SystemZ::J:
1803 case SystemZ::JG:
1805 SystemZ::CCMASK_ANY, &MI.getOperand(0));
1806
1807 case SystemZ::BRC:
1808 case SystemZ::BRCL:
1809 return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
1810 MI.getOperand(1).getImm(), &MI.getOperand(2));
1811
1812 case SystemZ::BRCT:
1813 case SystemZ::BRCTH:
1815 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1816
1817 case SystemZ::BRCTG:
1819 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1820
1821 case SystemZ::CIJ:
1822 case SystemZ::CRJ:
1824 MI.getOperand(2).getImm(), &MI.getOperand(3));
1825
1826 case SystemZ::CLIJ:
1827 case SystemZ::CLRJ:
1829 MI.getOperand(2).getImm(), &MI.getOperand(3));
1830
1831 case SystemZ::CGIJ:
1832 case SystemZ::CGRJ:
1834 MI.getOperand(2).getImm(), &MI.getOperand(3));
1835
1836 case SystemZ::CLGIJ:
1837 case SystemZ::CLGRJ:
1839 MI.getOperand(2).getImm(), &MI.getOperand(3));
1840
1841 case SystemZ::INLINEASM_BR:
1842 // Don't try to analyze asm goto, so pass nullptr as branch target argument.
1843 return SystemZII::Branch(SystemZII::AsmGoto, 0, 0, nullptr);
1844
1845 default:
1846 llvm_unreachable("Unrecognized branch opcode");
1847 }
1848}
1849
1851 unsigned &LoadOpcode,
1852 unsigned &StoreOpcode) const {
1853 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
1854 LoadOpcode = SystemZ::L;
1855 StoreOpcode = SystemZ::ST;
1856 } else if (RC == &SystemZ::GRH32BitRegClass) {
1857 LoadOpcode = SystemZ::LFH;
1858 StoreOpcode = SystemZ::STFH;
1859 } else if (RC == &SystemZ::GRX32BitRegClass) {
1860 LoadOpcode = SystemZ::LMux;
1861 StoreOpcode = SystemZ::STMux;
1862 } else if (RC == &SystemZ::GR64BitRegClass ||
1863 RC == &SystemZ::ADDR64BitRegClass) {
1864 LoadOpcode = SystemZ::LG;
1865 StoreOpcode = SystemZ::STG;
1866 } else if (RC == &SystemZ::GR128BitRegClass ||
1867 RC == &SystemZ::ADDR128BitRegClass) {
1868 LoadOpcode = SystemZ::L128;
1869 StoreOpcode = SystemZ::ST128;
1870 } else if (RC == &SystemZ::FP32BitRegClass) {
1871 LoadOpcode = SystemZ::LE;
1872 StoreOpcode = SystemZ::STE;
1873 } else if (RC == &SystemZ::FP64BitRegClass) {
1874 LoadOpcode = SystemZ::LD;
1875 StoreOpcode = SystemZ::STD;
1876 } else if (RC == &SystemZ::FP128BitRegClass) {
1877 LoadOpcode = SystemZ::LX;
1878 StoreOpcode = SystemZ::STX;
1879 } else if (RC == &SystemZ::VR32BitRegClass) {
1880 LoadOpcode = SystemZ::VL32;
1881 StoreOpcode = SystemZ::VST32;
1882 } else if (RC == &SystemZ::VR64BitRegClass) {
1883 LoadOpcode = SystemZ::VL64;
1884 StoreOpcode = SystemZ::VST64;
1885 } else if (RC == &SystemZ::VF128BitRegClass ||
1886 RC == &SystemZ::VR128BitRegClass) {
1887 LoadOpcode = SystemZ::VL;
1888 StoreOpcode = SystemZ::VST;
1889 } else
1890 llvm_unreachable("Unsupported regclass to load or store");
1891}
1892
1894 int64_t Offset,
1895 const MachineInstr *MI) const {
1896 const MCInstrDesc &MCID = get(Opcode);
1897 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
1898 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
1899 // Get the instruction to use for unsigned 12-bit displacements.
1900 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
1901 if (Disp12Opcode >= 0)
1902 return Disp12Opcode;
1903
1904 // All address-related instructions can use unsigned 12-bit
1905 // displacements.
1906 return Opcode;
1907 }
1908 if (isInt<20>(Offset) && isInt<20>(Offset2)) {
1909 // Get the instruction to use for signed 20-bit displacements.
1910 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
1911 if (Disp20Opcode >= 0)
1912 return Disp20Opcode;
1913
1914 // Check whether Opcode allows signed 20-bit displacements.
1916 return Opcode;
1917
1918 // If a VR32/VR64 reg ended up in an FP register, use the FP opcode.
1919 if (MI && MI->getOperand(0).isReg()) {
1920 Register Reg = MI->getOperand(0).getReg();
1921 if (Reg.isPhysical() && SystemZMC::getFirstReg(Reg) < 16) {
1922 switch (Opcode) {
1923 case SystemZ::VL32:
1924 return SystemZ::LEY;
1925 case SystemZ::VST32:
1926 return SystemZ::STEY;
1927 case SystemZ::VL64:
1928 return SystemZ::LDY;
1929 case SystemZ::VST64:
1930 return SystemZ::STDY;
1931 default: break;
1932 }
1933 }
1934 }
1935 }
1936 return 0;
1937}
1938
1940 const MCInstrDesc &MCID = get(Opcode);
1942 return SystemZ::getDisp12Opcode(Opcode) >= 0;
1943 return SystemZ::getDisp20Opcode(Opcode) >= 0;
1944}
1945
1946unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
1947 switch (Opcode) {
1948 case SystemZ::L: return SystemZ::LT;
1949 case SystemZ::LY: return SystemZ::LT;
1950 case SystemZ::LG: return SystemZ::LTG;
1951 case SystemZ::LGF: return SystemZ::LTGF;
1952 case SystemZ::LR: return SystemZ::LTR;
1953 case SystemZ::LGFR: return SystemZ::LTGFR;
1954 case SystemZ::LGR: return SystemZ::LTGR;
1955 case SystemZ::LCDFR: return SystemZ::LCDBR;
1956 case SystemZ::LPDFR: return SystemZ::LPDBR;
1957 case SystemZ::LNDFR: return SystemZ::LNDBR;
1958 case SystemZ::LCDFR_32: return SystemZ::LCEBR;
1959 case SystemZ::LPDFR_32: return SystemZ::LPEBR;
1960 case SystemZ::LNDFR_32: return SystemZ::LNEBR;
1961 // On zEC12 we prefer to use RISBGN. But if there is a chance to
1962 // actually use the condition code, we may turn it back into RISGB.
1963 // Note that RISBG is not really a "load-and-test" instruction,
1964 // but sets the same condition code values, so is OK to use here.
1965 case SystemZ::RISBGN: return SystemZ::RISBG;
1966 default: return 0;
1967 }
1968}
1969
1970bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
1971 unsigned &Start, unsigned &End) const {
1972 // Reject trivial all-zero masks.
1973 Mask &= allOnes(BitSize);
1974 if (Mask == 0)
1975 return false;
1976
1977 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
1978 // the msb and End specifies the index of the lsb.
1979 unsigned LSB, Length;
1980 if (isShiftedMask_64(Mask, LSB, Length)) {
1981 Start = 63 - (LSB + Length - 1);
1982 End = 63 - LSB;
1983 return true;
1984 }
1985
1986 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
1987 // of the low 1s and End specifies the lsb of the high 1s.
1988 if (isShiftedMask_64(Mask ^ allOnes(BitSize), LSB, Length)) {
1989 assert(LSB > 0 && "Bottom bit must be set");
1990 assert(LSB + Length < BitSize && "Top bit must be set");
1991 Start = 63 - (LSB - 1);
1992 End = 63 - (LSB + Length);
1993 return true;
1994 }
1995
1996 return false;
1997}
1998
1999unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
2001 const MachineInstr *MI) const {
2002 switch (Opcode) {
2003 case SystemZ::CHI:
2004 case SystemZ::CGHI:
2005 if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
2006 return 0;
2007 break;
2008 case SystemZ::CLFI:
2009 case SystemZ::CLGFI:
2010 if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
2011 return 0;
2012 break;
2013 case SystemZ::CL:
2014 case SystemZ::CLG:
2015 if (!STI.hasMiscellaneousExtensions())
2016 return 0;
2017 if (!(MI && MI->getOperand(3).getReg() == 0))
2018 return 0;
2019 break;
2020 }
2021 switch (Type) {
2023 switch (Opcode) {
2024 case SystemZ::CR:
2025 return SystemZ::CRJ;
2026 case SystemZ::CGR:
2027 return SystemZ::CGRJ;
2028 case SystemZ::CHI:
2029 return SystemZ::CIJ;
2030 case SystemZ::CGHI:
2031 return SystemZ::CGIJ;
2032 case SystemZ::CLR:
2033 return SystemZ::CLRJ;
2034 case SystemZ::CLGR:
2035 return SystemZ::CLGRJ;
2036 case SystemZ::CLFI:
2037 return SystemZ::CLIJ;
2038 case SystemZ::CLGFI:
2039 return SystemZ::CLGIJ;
2040 default:
2041 return 0;
2042 }
2044 switch (Opcode) {
2045 case SystemZ::CR:
2046 return SystemZ::CRBReturn;
2047 case SystemZ::CGR:
2048 return SystemZ::CGRBReturn;
2049 case SystemZ::CHI:
2050 return SystemZ::CIBReturn;
2051 case SystemZ::CGHI:
2052 return SystemZ::CGIBReturn;
2053 case SystemZ::CLR:
2054 return SystemZ::CLRBReturn;
2055 case SystemZ::CLGR:
2056 return SystemZ::CLGRBReturn;
2057 case SystemZ::CLFI:
2058 return SystemZ::CLIBReturn;
2059 case SystemZ::CLGFI:
2060 return SystemZ::CLGIBReturn;
2061 default:
2062 return 0;
2063 }
2065 switch (Opcode) {
2066 case SystemZ::CR:
2067 return SystemZ::CRBCall;
2068 case SystemZ::CGR:
2069 return SystemZ::CGRBCall;
2070 case SystemZ::CHI:
2071 return SystemZ::CIBCall;
2072 case SystemZ::CGHI:
2073 return SystemZ::CGIBCall;
2074 case SystemZ::CLR:
2075 return SystemZ::CLRBCall;
2076 case SystemZ::CLGR:
2077 return SystemZ::CLGRBCall;
2078 case SystemZ::CLFI:
2079 return SystemZ::CLIBCall;
2080 case SystemZ::CLGFI:
2081 return SystemZ::CLGIBCall;
2082 default:
2083 return 0;
2084 }
2086 switch (Opcode) {
2087 case SystemZ::CR:
2088 return SystemZ::CRT;
2089 case SystemZ::CGR:
2090 return SystemZ::CGRT;
2091 case SystemZ::CHI:
2092 return SystemZ::CIT;
2093 case SystemZ::CGHI:
2094 return SystemZ::CGIT;
2095 case SystemZ::CLR:
2096 return SystemZ::CLRT;
2097 case SystemZ::CLGR:
2098 return SystemZ::CLGRT;
2099 case SystemZ::CLFI:
2100 return SystemZ::CLFIT;
2101 case SystemZ::CLGFI:
2102 return SystemZ::CLGIT;
2103 case SystemZ::CL:
2104 return SystemZ::CLT;
2105 case SystemZ::CLG:
2106 return SystemZ::CLGT;
2107 default:
2108 return 0;
2109 }
2110 }
2111 return 0;
2112}
2113
2116 assert(MBBI->isCompare() && MBBI->getOperand(0).isReg() &&
2117 MBBI->getOperand(1).isReg() && !MBBI->mayLoad() &&
2118 "Not a compare reg/reg.");
2119
2121 bool CCLive = true;
2123 for (MachineInstr &MI : llvm::make_range(std::next(MBBI), MBB->end())) {
2124 if (MI.readsRegister(SystemZ::CC, /*TRI=*/nullptr)) {
2125 unsigned Flags = MI.getDesc().TSFlags;
2126 if ((Flags & SystemZII::CCMaskFirst) || (Flags & SystemZII::CCMaskLast))
2127 CCUsers.push_back(&MI);
2128 else
2129 return false;
2130 }
2131 if (MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr)) {
2132 CCLive = false;
2133 break;
2134 }
2135 }
2136 if (CCLive) {
2138 LiveRegs.addLiveOuts(*MBB);
2139 if (!LiveRegs.available(SystemZ::CC))
2140 return false;
2141 }
2142
2143 // Update all CC users.
2144 for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) {
2145 unsigned Flags = CCUsers[Idx]->getDesc().TSFlags;
2146 unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ?
2147 0 : CCUsers[Idx]->getNumExplicitOperands() - 2);
2148 MachineOperand &CCMaskMO = CCUsers[Idx]->getOperand(FirstOpNum + 1);
2149 unsigned NewCCMask = SystemZ::reverseCCMask(CCMaskMO.getImm());
2150 CCMaskMO.setImm(NewCCMask);
2151 }
2152
2153 return true;
2154}
2155
2156unsigned SystemZ::reverseCCMask(unsigned CCMask) {
2157 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
2160 (CCMask & SystemZ::CCMASK_CMP_UO));
2161}
2162
2164 MachineFunction &MF = *MBB->getParent();
2166 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
2167 return NewMBB;
2168}
2169
2173 NewMBB->splice(NewMBB->begin(), MBB,
2174 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
2176 return NewMBB;
2177}
2178
2182 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
2184 return NewMBB;
2185}
2186
2187unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const {
2188 if (!STI.hasLoadAndTrap())
2189 return 0;
2190 switch (Opcode) {
2191 case SystemZ::L:
2192 case SystemZ::LY:
2193 return SystemZ::LAT;
2194 case SystemZ::LG:
2195 return SystemZ::LGAT;
2196 case SystemZ::LFH:
2197 return SystemZ::LFHAT;
2198 case SystemZ::LLGF:
2199 return SystemZ::LLGFAT;
2200 case SystemZ::LLGT:
2201 return SystemZ::LLGTAT;
2202 }
2203 return 0;
2204}
2205
2208 unsigned Reg, uint64_t Value) const {
2209 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
2210 unsigned Opcode = 0;
2211 if (isInt<16>(Value))
2212 Opcode = SystemZ::LGHI;
2213 else if (SystemZ::isImmLL(Value))
2214 Opcode = SystemZ::LLILL;
2215 else if (SystemZ::isImmLH(Value)) {
2216 Opcode = SystemZ::LLILH;
2217 Value >>= 16;
2218 }
2219 else if (isInt<32>(Value))
2220 Opcode = SystemZ::LGFI;
2221 if (Opcode) {
2222 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
2223 return;
2224 }
2225
2227 assert (MRI.isSSA() && "Huge values only handled before reg-alloc .");
2228 Register Reg0 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2229 Register Reg1 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2230 BuildMI(MBB, MBBI, DL, get(SystemZ::IMPLICIT_DEF), Reg0);
2231 BuildMI(MBB, MBBI, DL, get(SystemZ::IIHF64), Reg1)
2232 .addReg(Reg0).addImm(Value >> 32);
2233 BuildMI(MBB, MBBI, DL, get(SystemZ::IILF64), Reg)
2234 .addReg(Reg1).addImm(Value & ((uint64_t(1) << 32) - 1));
2235}
2236
2238 StringRef &ErrInfo) const {
2239 const MCInstrDesc &MCID = MI.getDesc();
2240 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
2241 if (I >= MCID.getNumOperands())
2242 break;
2243 const MachineOperand &Op = MI.getOperand(I);
2244 const MCOperandInfo &MCOI = MCID.operands()[I];
2245 // Addressing modes have register and immediate operands. Op should be a
2246 // register (or frame index) operand if MCOI.RegClass contains a valid
2247 // register class, or an immediate otherwise.
2248 if (MCOI.OperandType == MCOI::OPERAND_MEMORY &&
2249 ((MCOI.RegClass != -1 && !Op.isReg() && !Op.isFI()) ||
2250 (MCOI.RegClass == -1 && !Op.isImm()))) {
2251 ErrInfo = "Addressing mode operands corrupt!";
2252 return false;
2253 }
2254 }
2255
2256 return true;
2257}
2258
2261 const MachineInstr &MIb) const {
2262
2263 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
2264 return false;
2265
2266 // If mem-operands show that the same address Value is used by both
2267 // instructions, check for non-overlapping offsets and widths. Not
2268 // sure if a register based analysis would be an improvement...
2269
2270 MachineMemOperand *MMOa = *MIa.memoperands_begin();
2271 MachineMemOperand *MMOb = *MIb.memoperands_begin();
2272 const Value *VALa = MMOa->getValue();
2273 const Value *VALb = MMOb->getValue();
2274 bool SameVal = (VALa && VALb && (VALa == VALb));
2275 if (!SameVal) {
2276 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
2277 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
2278 if (PSVa && PSVb && (PSVa == PSVb))
2279 SameVal = true;
2280 }
2281 if (SameVal) {
2282 int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset();
2283 LocationSize WidthA = MMOa->getSize(), WidthB = MMOb->getSize();
2284 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
2285 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
2286 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2287 if (LowWidth.hasValue() &&
2288 LowOffset + (int)LowWidth.getValue() <= HighOffset)
2289 return true;
2290 }
2291
2292 return false;
2293}
2294
2296 const Register Reg,
2297 int64_t &ImmVal) const {
2298
2299 if (MI.getOpcode() == SystemZ::VGBM && Reg == MI.getOperand(0).getReg()) {
2300 ImmVal = MI.getOperand(1).getImm();
2301 // TODO: Handle non-0 values
2302 return ImmVal == 0;
2303 }
2304
2305 return false;
2306}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
IRTranslator LLVM IR MI
A set of register units.
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag)
static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI)
static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI, MachineInstr::MIFlag Flag)
static int isSimpleMove(const MachineInstr &MI, int &FrameIndex, unsigned Flag)
static LogicOp interpretAndImmediate(unsigned Opcode)
static uint64_t allOnes(unsigned int Count)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
Value * RHS
Value * LHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
SlotIndexes * getSlotIndexes() const
VNInfo::Allocator & getVNInfoAllocator()
LiveRange & getRegUnit(unsigned Unit)
Return the live range for register unit Unit.
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
VNInfo * createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc)
createDeadDef - Make sure the range has a value defined at Def.
A set of register units used to track register liveness.
Definition: LiveRegUnits.h:30
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
Definition: LiveRegUnits.h:116
void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
bool hasValue() const
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:91
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
Definition: MCRegister.h:74
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:346
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:396
bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
bool registerDefIsDead(Register Reg, const TargetRegisterInfo *TRI) const
Returns true if the register is dead in this machine instruction.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:815
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:685
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:800
void setFlag(MIFlag Flag)
Set a MI flag.
Definition: MachineInstr.h:403
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
int64_t getImm() const
void setIsDead(bool Val=true)
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition: StackMaps.h:76
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Definition: StackMaps.h:104
Special value supplied for machine level alias analysis.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:65
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:237
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:379
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A SystemZ-specific class detailing special use registers particular for calling conventions.
unsigned getLoadAndTrap(unsigned Opcode) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
unsigned getLoadAndTest(unsigned Opcode) const
bool isPredicable(const MachineInstr &MI) const override
bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset, const MachineInstr *MI=nullptr) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const override
SystemZInstrInfo(SystemZSubtarget &STI)
bool hasDisplacementPairInsn(unsigned Opcode) const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned CommuteOpIdx1, unsigned CommuteOpIdx2) const override
Commutes the operands in the given instruction by changing the operands order and/or changing the ins...
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
SystemZII::Branch getBranchInfo(const MachineInstr &MI) const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
unsigned getFusedCompare(unsigned Opcode, SystemZII::FusedCompareType Type, const MachineInstr *MI=nullptr) const
bool expandPostRAPseudo(MachineInstr &MBBI) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
void getLoadStoreOpcodes(const TargetRegisterClass *RC, unsigned &LoadOpcode, unsigned &StoreOpcode) const
bool isRxSBGMask(uint64_t Mask, unsigned BitSize, unsigned &Start, unsigned &End) const
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool prepareCompareSwapOperands(MachineBasicBlock::iterator MBBI) const
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
void loadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Reg, uint64_t Value) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
SystemZCallingConventionRegisters * getSpecialRegisters() const
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Target - Wrapper for Target specific information.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_MEMORY
Definition: MCInstrDesc.h:62
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
static unsigned getAccessSize(unsigned int Flags)
unsigned getFirstReg(unsigned Reg)
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
int getTargetMemOpcode(uint16_t Opcode)
const unsigned CCMASK_CMP_GT
Definition: SystemZ.h:37
const unsigned CCMASK_ANY
Definition: SystemZ.h:31
static bool isImmLL(uint64_t Val)
Definition: SystemZ.h:161
static bool isImmLH(uint64_t Val)
Definition: SystemZ.h:166
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
unsigned reverseCCMask(unsigned CCMask)
const unsigned IPM_CC
Definition: SystemZ.h:112
const unsigned CCMASK_CMP_EQ
Definition: SystemZ.h:35
const unsigned CCMASK_ICMP
Definition: SystemZ.h:47
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_CMP_LT
Definition: SystemZ.h:36
const unsigned CCMASK_CMP_NE
Definition: SystemZ.h:38
bool isHighReg(unsigned int Reg)
const unsigned CCMASK_CMP_UO
Definition: SystemZ.h:43
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:285
constexpr size_t range_size(R &&Range)
Returns the size of the Range, i.e., the number of elements.
Definition: STLExtras.h:1705
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
unsigned getUndefRegState(bool B)
@ And
Bitwise or logical AND of integers.
unsigned getKillRegState(bool B)