LLVM 19.0.0git
SystemZInstrInfo.cpp
Go to the documentation of this file.
1//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the SystemZ implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SystemZInstrInfo.h"
15#include "SystemZ.h"
16#include "SystemZInstrBuilder.h"
17#include "SystemZSubtarget.h"
18#include "llvm/ADT/Statistic.h"
35#include "llvm/MC/MCInstrDesc.h"
41#include <cassert>
42#include <cstdint>
43#include <iterator>
44
45using namespace llvm;
46
47#define GET_INSTRINFO_CTOR_DTOR
48#define GET_INSTRMAP_INFO
49#include "SystemZGenInstrInfo.inc"
50
51#define DEBUG_TYPE "systemz-II"
52
53// Return a mask with Count low bits set.
54static uint64_t allOnes(unsigned int Count) {
55 return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
56}
57
58// Pin the vtable to this file.
59void SystemZInstrInfo::anchor() {}
60
62 : SystemZGenInstrInfo(-1, -1),
63 RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister()),
64 STI(sti) {}
65
66// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
67// each having the opcode given by NewOpcode.
68void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
69 unsigned NewOpcode) const {
70 MachineBasicBlock *MBB = MI->getParent();
72
73 // Get two load or store instructions. Use the original instruction for one
74 // of them (arbitrarily the second here) and create a clone for the other.
75 MachineInstr *EarlierMI = MF.CloneMachineInstr(&*MI);
76 MBB->insert(MI, EarlierMI);
77
78 // Set up the two 64-bit registers and remember super reg and its flags.
79 MachineOperand &HighRegOp = EarlierMI->getOperand(0);
80 MachineOperand &LowRegOp = MI->getOperand(0);
81 Register Reg128 = LowRegOp.getReg();
82 unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
83 unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef());
84 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
85 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
86
87 if (MI->mayStore()) {
88 // Add implicit uses of the super register in case one of the subregs is
89 // undefined. We could track liveness and skip storing an undefined
90 // subreg, but this is hopefully rare (discovered with llvm-stress).
91 // If Reg128 was killed, set kill flag on MI.
92 unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
93 MachineInstrBuilder(MF, EarlierMI).addReg(Reg128, Reg128UndefImpl);
94 MachineInstrBuilder(MF, MI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
95 }
96
97 // The address in the first (high) instruction is already correct.
98 // Adjust the offset in the second (low) instruction.
99 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
100 MachineOperand &LowOffsetOp = MI->getOperand(2);
101 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
102
103 // Clear the kill flags on the registers in the first instruction.
104 if (EarlierMI->getOperand(0).isReg() && EarlierMI->getOperand(0).isUse())
105 EarlierMI->getOperand(0).setIsKill(false);
106 EarlierMI->getOperand(1).setIsKill(false);
107 EarlierMI->getOperand(3).setIsKill(false);
108
109 // Set the opcodes.
110 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
111 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
112 assert(HighOpcode && LowOpcode && "Both offsets should be in range");
113
114 EarlierMI->setDesc(get(HighOpcode));
115 MI->setDesc(get(LowOpcode));
116}
117
118// Split ADJDYNALLOC instruction MI.
119void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
120 MachineBasicBlock *MBB = MI->getParent();
121 MachineFunction &MF = *MBB->getParent();
122 MachineFrameInfo &MFFrame = MF.getFrameInfo();
123 MachineOperand &OffsetMO = MI->getOperand(2);
125
126 uint64_t Offset = (MFFrame.getMaxCallFrameSize() +
127 Regs->getCallFrameSize() +
128 Regs->getStackPointerBias() +
129 OffsetMO.getImm());
130 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
131 assert(NewOpcode && "No support for huge argument lists yet");
132 MI->setDesc(get(NewOpcode));
133 OffsetMO.setImm(Offset);
134}
135
136// MI is an RI-style pseudo instruction. Replace it with LowOpcode
137// if the first operand is a low GR32 and HighOpcode if the first operand
138// is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
139// and HighOpcode takes an unsigned 32-bit operand. In those cases,
140// MI has the same kind of operand as LowOpcode, so needs to be converted
141// if HighOpcode is used.
142void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
143 unsigned HighOpcode,
144 bool ConvertHigh) const {
145 Register Reg = MI.getOperand(0).getReg();
146 bool IsHigh = SystemZ::isHighReg(Reg);
147 MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
148 if (IsHigh && ConvertHigh)
149 MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
150}
151
152// MI is a three-operand RIE-style pseudo instruction. Replace it with
153// LowOpcodeK if the registers are both low GR32s, otherwise use a move
154// followed by HighOpcode or LowOpcode, depending on whether the target
155// is a high or low GR32.
156void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
157 unsigned LowOpcodeK,
158 unsigned HighOpcode) const {
159 Register DestReg = MI.getOperand(0).getReg();
160 Register SrcReg = MI.getOperand(1).getReg();
161 bool DestIsHigh = SystemZ::isHighReg(DestReg);
162 bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
163 if (!DestIsHigh && !SrcIsHigh)
164 MI.setDesc(get(LowOpcodeK));
165 else {
166 if (DestReg != SrcReg) {
167 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
168 SystemZ::LR, 32, MI.getOperand(1).isKill(),
169 MI.getOperand(1).isUndef());
170 MI.getOperand(1).setReg(DestReg);
171 }
172 MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
173 MI.tieOperands(0, 1);
174 }
175}
176
177// MI is an RXY-style pseudo instruction. Replace it with LowOpcode
178// if the first operand is a low GR32 and HighOpcode if the first operand
179// is a high GR32.
180void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
181 unsigned HighOpcode) const {
182 Register Reg = MI.getOperand(0).getReg();
183 unsigned Opcode = getOpcodeForOffset(
184 SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode,
185 MI.getOperand(2).getImm());
186 MI.setDesc(get(Opcode));
187}
188
189// MI is a load-on-condition pseudo instruction with a single register
190// (source or destination) operand. Replace it with LowOpcode if the
191// register is a low GR32 and HighOpcode if the register is a high GR32.
192void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
193 unsigned HighOpcode) const {
194 Register Reg = MI.getOperand(0).getReg();
195 unsigned Opcode = SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode;
196 MI.setDesc(get(Opcode));
197}
198
199// MI is an RR-style pseudo instruction that zero-extends the low Size bits
200// of one GRX32 into another. Replace it with LowOpcode if both operands
201// are low registers, otherwise use RISB[LH]G.
202void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
203 unsigned Size) const {
205 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
206 MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
207 Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef());
208
209 // Keep the remaining operands as-is.
210 for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2))
211 MIB.add(MO);
212
213 MI.eraseFromParent();
214}
215
216void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
217 MachineBasicBlock *MBB = MI->getParent();
218 MachineFunction &MF = *MBB->getParent();
219 const Register Reg64 = MI->getOperand(0).getReg();
220 const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);
221
222 // EAR can only load the low subregister so us a shift for %a0 to produce
223 // the GR containing %a0 and %a1.
224
225 // ear <reg>, %a0
226 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
227 .addReg(SystemZ::A0)
229
230 // sllg <reg>, <reg>, 32
231 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64)
232 .addReg(Reg64)
233 .addReg(0)
234 .addImm(32);
235
236 // ear <reg>, %a1
237 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
238 .addReg(SystemZ::A1);
239
240 // lg <reg>, 40(<reg>)
241 MI->setDesc(get(SystemZ::LG));
242 MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0);
243}
244
245// Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
246// DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
247// are low registers, otherwise use RISB[LH]G. Size is the number of bits
248// taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
249// KillSrc is true if this move is the last use of SrcReg.
251SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
253 const DebugLoc &DL, unsigned DestReg,
254 unsigned SrcReg, unsigned LowLowOpcode,
255 unsigned Size, bool KillSrc,
256 bool UndefSrc) const {
257 unsigned Opcode;
258 bool DestIsHigh = SystemZ::isHighReg(DestReg);
259 bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
260 if (DestIsHigh && SrcIsHigh)
261 Opcode = SystemZ::RISBHH;
262 else if (DestIsHigh && !SrcIsHigh)
263 Opcode = SystemZ::RISBHL;
264 else if (!DestIsHigh && SrcIsHigh)
265 Opcode = SystemZ::RISBLH;
266 else {
267 return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
268 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc));
269 }
270 unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
271 return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
272 .addReg(DestReg, RegState::Undef)
273 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc))
274 .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
275}
276
278 bool NewMI,
279 unsigned OpIdx1,
280 unsigned OpIdx2) const {
281 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
282 if (NewMI)
283 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
284 return MI;
285 };
286
287 switch (MI.getOpcode()) {
288 case SystemZ::SELRMux:
289 case SystemZ::SELFHR:
290 case SystemZ::SELR:
291 case SystemZ::SELGR:
292 case SystemZ::LOCRMux:
293 case SystemZ::LOCFHR:
294 case SystemZ::LOCR:
295 case SystemZ::LOCGR: {
296 auto &WorkingMI = cloneIfNew(MI);
297 // Invert condition.
298 unsigned CCValid = WorkingMI.getOperand(3).getImm();
299 unsigned CCMask = WorkingMI.getOperand(4).getImm();
300 WorkingMI.getOperand(4).setImm(CCMask ^ CCValid);
301 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
302 OpIdx1, OpIdx2);
303 }
304 default:
305 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
306 }
307}
308
309// If MI is a simple load or store for a frame object, return the register
310// it loads or stores and set FrameIndex to the index of the frame object.
311// Return 0 otherwise.
312//
313// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
314static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
315 unsigned Flag) {
316 const MCInstrDesc &MCID = MI.getDesc();
317 if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
318 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
319 FrameIndex = MI.getOperand(1).getIndex();
320 return MI.getOperand(0).getReg();
321 }
322 return 0;
323}
324
326 int &FrameIndex) const {
327 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
328}
329
331 int &FrameIndex) const {
332 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
333}
334
336 int &DestFrameIndex,
337 int &SrcFrameIndex) const {
338 // Check for MVC 0(Length,FI1),0(FI2)
339 const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo();
340 if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
341 MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
342 MI.getOperand(4).getImm() != 0)
343 return false;
344
345 // Check that Length covers the full slots.
346 int64_t Length = MI.getOperand(2).getImm();
347 unsigned FI1 = MI.getOperand(0).getIndex();
348 unsigned FI2 = MI.getOperand(3).getIndex();
349 if (MFI.getObjectSize(FI1) != Length ||
350 MFI.getObjectSize(FI2) != Length)
351 return false;
352
353 DestFrameIndex = FI1;
354 SrcFrameIndex = FI2;
355 return true;
356}
357
360 MachineBasicBlock *&FBB,
362 bool AllowModify) const {
363 // Most of the code and comments here are boilerplate.
364
365 // Start from the bottom of the block and work up, examining the
366 // terminator instructions.
368 while (I != MBB.begin()) {
369 --I;
370 if (I->isDebugInstr())
371 continue;
372
373 // Working from the bottom, when we see a non-terminator instruction, we're
374 // done.
375 if (!isUnpredicatedTerminator(*I))
376 break;
377
378 // A terminator that isn't a branch can't easily be handled by this
379 // analysis.
380 if (!I->isBranch())
381 return true;
382
383 // Can't handle indirect branches.
385 if (!Branch.hasMBBTarget())
386 return true;
387
388 // Punt on compound branches.
389 if (Branch.Type != SystemZII::BranchNormal)
390 return true;
391
392 if (Branch.CCMask == SystemZ::CCMASK_ANY) {
393 // Handle unconditional branches.
394 if (!AllowModify) {
395 TBB = Branch.getMBBTarget();
396 continue;
397 }
398
399 // If the block has any instructions after a JMP, delete them.
400 MBB.erase(std::next(I), MBB.end());
401
402 Cond.clear();
403 FBB = nullptr;
404
405 // Delete the JMP if it's equivalent to a fall-through.
406 if (MBB.isLayoutSuccessor(Branch.getMBBTarget())) {
407 TBB = nullptr;
408 I->eraseFromParent();
409 I = MBB.end();
410 continue;
411 }
412
413 // TBB is used to indicate the unconditinal destination.
414 TBB = Branch.getMBBTarget();
415 continue;
416 }
417
418 // Working from the bottom, handle the first conditional branch.
419 if (Cond.empty()) {
420 // FIXME: add X86-style branch swap
421 FBB = TBB;
422 TBB = Branch.getMBBTarget();
423 Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
424 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
425 continue;
426 }
427
428 // Handle subsequent conditional branches.
429 assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
430
431 // Only handle the case where all conditional branches branch to the same
432 // destination.
433 if (TBB != Branch.getMBBTarget())
434 return true;
435
436 // If the conditions are the same, we can leave them alone.
437 unsigned OldCCValid = Cond[0].getImm();
438 unsigned OldCCMask = Cond[1].getImm();
439 if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
440 continue;
441
442 // FIXME: Try combining conditions like X86 does. Should be easy on Z!
443 return false;
444 }
445
446 return false;
447}
448
450 int *BytesRemoved) const {
451 assert(!BytesRemoved && "code size not handled");
452
453 // Most of the code and comments here are boilerplate.
455 unsigned Count = 0;
456
457 while (I != MBB.begin()) {
458 --I;
459 if (I->isDebugInstr())
460 continue;
461 if (!I->isBranch())
462 break;
463 if (!getBranchInfo(*I).hasMBBTarget())
464 break;
465 // Remove the branch.
466 I->eraseFromParent();
467 I = MBB.end();
468 ++Count;
469 }
470
471 return Count;
472}
473
476 assert(Cond.size() == 2 && "Invalid condition");
477 Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
478 return false;
479}
480
485 const DebugLoc &DL,
486 int *BytesAdded) const {
487 // In this function we output 32-bit branches, which should always
488 // have enough range. They can be shortened and relaxed by later code
489 // in the pipeline, if desired.
490
491 // Shouldn't be a fall through.
492 assert(TBB && "insertBranch must not be told to insert a fallthrough");
493 assert((Cond.size() == 2 || Cond.size() == 0) &&
494 "SystemZ branch conditions have one component!");
495 assert(!BytesAdded && "code size not handled");
496
497 if (Cond.empty()) {
498 // Unconditional branch?
499 assert(!FBB && "Unconditional branch with multiple successors!");
500 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
501 return 1;
502 }
503
504 // Conditional branch.
505 unsigned Count = 0;
506 unsigned CCValid = Cond[0].getImm();
507 unsigned CCMask = Cond[1].getImm();
508 BuildMI(&MBB, DL, get(SystemZ::BRC))
509 .addImm(CCValid).addImm(CCMask).addMBB(TBB);
510 ++Count;
511
512 if (FBB) {
513 // Two-way Conditional branch. Insert the second branch.
514 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
515 ++Count;
516 }
517 return Count;
518}
519
521 Register &SrcReg2, int64_t &Mask,
522 int64_t &Value) const {
523 assert(MI.isCompare() && "Caller should have checked for a comparison");
524
525 if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
526 MI.getOperand(1).isImm()) {
527 SrcReg = MI.getOperand(0).getReg();
528 SrcReg2 = 0;
529 Value = MI.getOperand(1).getImm();
530 Mask = ~0;
531 return true;
532 }
533
534 return false;
535}
536
539 Register DstReg, Register TrueReg,
540 Register FalseReg, int &CondCycles,
541 int &TrueCycles,
542 int &FalseCycles) const {
543 // Not all subtargets have LOCR instructions.
544 if (!STI.hasLoadStoreOnCond())
545 return false;
546 if (Pred.size() != 2)
547 return false;
548
549 // Check register classes.
551 const TargetRegisterClass *RC =
552 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
553 if (!RC)
554 return false;
555
556 // We have LOCR instructions for 32 and 64 bit general purpose registers.
557 if ((STI.hasLoadStoreOnCond2() &&
558 SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) ||
559 SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
560 SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
561 CondCycles = 2;
562 TrueCycles = 2;
563 FalseCycles = 2;
564 return true;
565 }
566
567 // Can't do anything else.
568 return false;
569}
570
573 const DebugLoc &DL, Register DstReg,
575 Register TrueReg,
576 Register FalseReg) const {
578 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
579
580 assert(Pred.size() == 2 && "Invalid condition");
581 unsigned CCValid = Pred[0].getImm();
582 unsigned CCMask = Pred[1].getImm();
583
584 unsigned Opc;
585 if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) {
586 if (STI.hasMiscellaneousExtensions3())
587 Opc = SystemZ::SELRMux;
588 else if (STI.hasLoadStoreOnCond2())
589 Opc = SystemZ::LOCRMux;
590 else {
591 Opc = SystemZ::LOCR;
592 MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass);
593 Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
594 Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
595 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);
596 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);
597 TrueReg = TReg;
598 FalseReg = FReg;
599 }
600 } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
601 if (STI.hasMiscellaneousExtensions3())
602 Opc = SystemZ::SELGR;
603 else
604 Opc = SystemZ::LOCGR;
605 } else
606 llvm_unreachable("Invalid register class");
607
608 BuildMI(MBB, I, DL, get(Opc), DstReg)
609 .addReg(FalseReg).addReg(TrueReg)
610 .addImm(CCValid).addImm(CCMask);
611}
612
615 Register &FoldAsLoadDefReg,
616 MachineInstr *&DefMI) const {
617 // Check whether we can move the DefMI load, and that it only has one use.
618 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
619 assert(DefMI);
620 bool SawStore = false;
621 if (!DefMI->isSafeToMove(nullptr, SawStore) ||
622 !MRI->hasOneNonDBGUse(FoldAsLoadDefReg))
623 return nullptr;
624
625 int UseOpIdx =
626 MI.findRegisterUseOperandIdx(FoldAsLoadDefReg, /*TRI=*/nullptr);
627 assert(UseOpIdx != -1 && "Expected FoldAsLoadDefReg to be used by MI.");
628
629 // Check whether we can fold the load.
630 if (MachineInstr *FoldMI =
631 foldMemoryOperand(MI, {((unsigned)UseOpIdx)}, *DefMI)) {
632 FoldAsLoadDefReg = 0;
633 return FoldMI;
634 }
635
636 return nullptr;
637}
638
640 Register Reg,
641 MachineRegisterInfo *MRI) const {
642 unsigned DefOpc = DefMI.getOpcode();
643
644 if (DefOpc == SystemZ::VGBM) {
645 int64_t ImmVal = DefMI.getOperand(1).getImm();
646 if (ImmVal != 0) // TODO: Handle other values
647 return false;
648
649 // Fold gr128 = COPY (vr128 VGBM imm)
650 //
651 // %tmp:gr64 = LGHI 0
652 // to gr128 = REG_SEQUENCE %tmp, %tmp
653 assert(DefMI.getOperand(0).getReg() == Reg);
654
655 if (!UseMI.isCopy())
656 return false;
657
658 Register CopyDstReg = UseMI.getOperand(0).getReg();
659 if (CopyDstReg.isVirtual() &&
660 MRI->getRegClass(CopyDstReg) == &SystemZ::GR128BitRegClass &&
661 MRI->hasOneNonDBGUse(Reg)) {
662 // TODO: Handle physical registers
663 // TODO: Handle gr64 uses with subregister indexes
664 // TODO: Should this multi-use cases?
665 Register TmpReg = MRI->createVirtualRegister(&SystemZ::GR64BitRegClass);
666 MachineBasicBlock &MBB = *UseMI.getParent();
667
668 loadImmediate(MBB, UseMI.getIterator(), TmpReg, ImmVal);
669
670 UseMI.setDesc(get(SystemZ::REG_SEQUENCE));
671 UseMI.getOperand(1).setReg(TmpReg);
673 .addImm(SystemZ::subreg_h64)
674 .addReg(TmpReg)
675 .addImm(SystemZ::subreg_l64);
676
677 if (MRI->use_nodbg_empty(Reg))
678 DefMI.eraseFromParent();
679 return true;
680 }
681
682 return false;
683 }
684
685 if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
686 DefOpc != SystemZ::LGHI)
687 return false;
688 if (DefMI.getOperand(0).getReg() != Reg)
689 return false;
690 int32_t ImmVal = (int32_t)DefMI.getOperand(1).getImm();
691
692 unsigned UseOpc = UseMI.getOpcode();
693 unsigned NewUseOpc;
694 unsigned UseIdx;
695 int CommuteIdx = -1;
696 bool TieOps = false;
697 switch (UseOpc) {
698 case SystemZ::SELRMux:
699 TieOps = true;
700 [[fallthrough]];
701 case SystemZ::LOCRMux:
702 if (!STI.hasLoadStoreOnCond2())
703 return false;
704 NewUseOpc = SystemZ::LOCHIMux;
705 if (UseMI.getOperand(2).getReg() == Reg)
706 UseIdx = 2;
707 else if (UseMI.getOperand(1).getReg() == Reg)
708 UseIdx = 2, CommuteIdx = 1;
709 else
710 return false;
711 break;
712 case SystemZ::SELGR:
713 TieOps = true;
714 [[fallthrough]];
715 case SystemZ::LOCGR:
716 if (!STI.hasLoadStoreOnCond2())
717 return false;
718 NewUseOpc = SystemZ::LOCGHI;
719 if (UseMI.getOperand(2).getReg() == Reg)
720 UseIdx = 2;
721 else if (UseMI.getOperand(1).getReg() == Reg)
722 UseIdx = 2, CommuteIdx = 1;
723 else
724 return false;
725 break;
726 default:
727 return false;
728 }
729
730 if (CommuteIdx != -1)
731 if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx))
732 return false;
733
734 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
735 UseMI.setDesc(get(NewUseOpc));
736 if (TieOps)
737 UseMI.tieOperands(0, 1);
738 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
739 if (DeleteDef)
740 DefMI.eraseFromParent();
741
742 return true;
743}
744
746 unsigned Opcode = MI.getOpcode();
747 if (Opcode == SystemZ::Return ||
748 Opcode == SystemZ::Return_XPLINK ||
749 Opcode == SystemZ::Trap ||
750 Opcode == SystemZ::CallJG ||
751 Opcode == SystemZ::CallBR)
752 return true;
753 return false;
754}
755
758 unsigned NumCycles, unsigned ExtraPredCycles,
759 BranchProbability Probability) const {
760 // Avoid using conditional returns at the end of a loop (since then
761 // we'd need to emit an unconditional branch to the beginning anyway,
762 // making the loop body longer). This doesn't apply for low-probability
763 // loops (eg. compare-and-swap retry), so just decide based on branch
764 // probability instead of looping structure.
765 // However, since Compare and Trap instructions cost the same as a regular
766 // Compare instruction, we should allow the if conversion to convert this
767 // into a Conditional Compare regardless of the branch probability.
768 if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
769 MBB.succ_empty() && Probability < BranchProbability(1, 8))
770 return false;
771 // For now only convert single instructions.
772 return NumCycles == 1;
773}
774
777 unsigned NumCyclesT, unsigned ExtraPredCyclesT,
778 MachineBasicBlock &FMBB,
779 unsigned NumCyclesF, unsigned ExtraPredCyclesF,
780 BranchProbability Probability) const {
781 // For now avoid converting mutually-exclusive cases.
782 return false;
783}
784
787 BranchProbability Probability) const {
788 // For now only duplicate single instructions.
789 return NumCycles == 1;
790}
791
794 assert(Pred.size() == 2 && "Invalid condition");
795 unsigned CCValid = Pred[0].getImm();
796 unsigned CCMask = Pred[1].getImm();
797 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
798 unsigned Opcode = MI.getOpcode();
799 if (Opcode == SystemZ::Trap) {
800 MI.setDesc(get(SystemZ::CondTrap));
801 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
802 .addImm(CCValid).addImm(CCMask)
803 .addReg(SystemZ::CC, RegState::Implicit);
804 return true;
805 }
806 if (Opcode == SystemZ::Return || Opcode == SystemZ::Return_XPLINK) {
807 MI.setDesc(get(Opcode == SystemZ::Return ? SystemZ::CondReturn
808 : SystemZ::CondReturn_XPLINK));
809 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
810 .addImm(CCValid)
811 .addImm(CCMask)
812 .addReg(SystemZ::CC, RegState::Implicit);
813 return true;
814 }
815 if (Opcode == SystemZ::CallJG) {
816 MachineOperand FirstOp = MI.getOperand(0);
817 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
818 MI.removeOperand(1);
819 MI.removeOperand(0);
820 MI.setDesc(get(SystemZ::CallBRCL));
821 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
822 .addImm(CCValid)
823 .addImm(CCMask)
824 .add(FirstOp)
825 .addRegMask(RegMask)
826 .addReg(SystemZ::CC, RegState::Implicit);
827 return true;
828 }
829 if (Opcode == SystemZ::CallBR) {
830 MachineOperand Target = MI.getOperand(0);
831 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
832 MI.removeOperand(1);
833 MI.removeOperand(0);
834 MI.setDesc(get(SystemZ::CallBCR));
835 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
836 .addImm(CCValid).addImm(CCMask)
837 .add(Target)
838 .addRegMask(RegMask)
839 .addReg(SystemZ::CC, RegState::Implicit);
840 return true;
841 }
842 return false;
843}
844
847 const DebugLoc &DL, MCRegister DestReg,
848 MCRegister SrcReg, bool KillSrc) const {
849 // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
850 // super register in case one of the subregs is undefined.
851 // This handles ADDR128 too.
852 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
853 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
854 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
855 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
856 .addReg(SrcReg, RegState::Implicit);
857 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
858 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
859 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
860 .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
861 return;
862 }
863
864 if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
865 emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc,
866 false);
867 return;
868 }
869
870 // Move 128-bit floating-point values between VR128 and FP128.
871 if (SystemZ::VR128BitRegClass.contains(DestReg) &&
872 SystemZ::FP128BitRegClass.contains(SrcReg)) {
873 MCRegister SrcRegHi =
874 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64),
875 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
876 MCRegister SrcRegLo =
877 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64),
878 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
879
880 BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg)
881 .addReg(SrcRegHi, getKillRegState(KillSrc))
882 .addReg(SrcRegLo, getKillRegState(KillSrc));
883 return;
884 }
885 if (SystemZ::FP128BitRegClass.contains(DestReg) &&
886 SystemZ::VR128BitRegClass.contains(SrcReg)) {
887 MCRegister DestRegHi =
888 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64),
889 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
890 MCRegister DestRegLo =
891 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64),
892 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
893
894 if (DestRegHi != SrcReg)
895 copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false);
896 BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo)
897 .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1);
898 return;
899 }
900
901 if (SystemZ::FP128BitRegClass.contains(DestReg) &&
902 SystemZ::GR128BitRegClass.contains(SrcReg)) {
903 MCRegister DestRegHi = RI.getSubReg(DestReg, SystemZ::subreg_h64);
904 MCRegister DestRegLo = RI.getSubReg(DestReg, SystemZ::subreg_l64);
905 MCRegister SrcRegHi = RI.getSubReg(SrcReg, SystemZ::subreg_h64);
906 MCRegister SrcRegLo = RI.getSubReg(SrcReg, SystemZ::subreg_l64);
907
908 BuildMI(MBB, MBBI, DL, get(SystemZ::LDGR), DestRegHi)
909 .addReg(SrcRegHi)
911
912 BuildMI(MBB, MBBI, DL, get(SystemZ::LDGR), DestRegLo)
913 .addReg(SrcRegLo, getKillRegState(KillSrc));
914 return;
915 }
916
917 // Move CC value from a GR32.
918 if (DestReg == SystemZ::CC) {
919 unsigned Opcode =
920 SystemZ::GR32BitRegClass.contains(SrcReg) ? SystemZ::TMLH : SystemZ::TMHH;
921 BuildMI(MBB, MBBI, DL, get(Opcode))
922 .addReg(SrcReg, getKillRegState(KillSrc))
923 .addImm(3 << (SystemZ::IPM_CC - 16));
924 return;
925 }
926
927 if (SystemZ::GR128BitRegClass.contains(DestReg) &&
928 SystemZ::VR128BitRegClass.contains(SrcReg)) {
929 MCRegister DestH64 = RI.getSubReg(DestReg, SystemZ::subreg_h64);
930 MCRegister DestL64 = RI.getSubReg(DestReg, SystemZ::subreg_l64);
931
932 BuildMI(MBB, MBBI, DL, get(SystemZ::VLGVG), DestH64)
933 .addReg(SrcReg)
934 .addReg(SystemZ::NoRegister)
935 .addImm(0)
936 .addDef(DestReg, RegState::Implicit);
937 BuildMI(MBB, MBBI, DL, get(SystemZ::VLGVG), DestL64)
938 .addReg(SrcReg, getKillRegState(KillSrc))
939 .addReg(SystemZ::NoRegister)
940 .addImm(1);
941 return;
942 }
943
944 if (SystemZ::VR128BitRegClass.contains(DestReg) &&
945 SystemZ::GR128BitRegClass.contains(SrcReg)) {
946 BuildMI(MBB, MBBI, DL, get(SystemZ::VLVGP), DestReg)
947 .addReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64))
948 .addReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64));
949 return;
950 }
951
952 // Everything else needs only one instruction.
953 unsigned Opcode;
954 if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
955 Opcode = SystemZ::LGR;
956 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
957 // For z13 we prefer LDR over LER to avoid partial register dependencies.
958 Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
959 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
960 Opcode = SystemZ::LDR;
961 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
962 Opcode = SystemZ::LXR;
963 else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
964 Opcode = SystemZ::VLR32;
965 else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
966 Opcode = SystemZ::VLR64;
967 else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
968 Opcode = SystemZ::VLR;
969 else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg))
970 Opcode = SystemZ::CPYA;
971 else
972 llvm_unreachable("Impossible reg-to-reg copy");
973
974 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
975 .addReg(SrcReg, getKillRegState(KillSrc));
976}
977
980 bool isKill, int FrameIdx, const TargetRegisterClass *RC,
981 const TargetRegisterInfo *TRI, Register VReg) const {
982 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
983
984 // Callers may expect a single instruction, so keep 128-bit moves
985 // together for now and lower them after register allocation.
986 unsigned LoadOpcode, StoreOpcode;
987 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
988 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
989 .addReg(SrcReg, getKillRegState(isKill)),
990 FrameIdx);
991}
992
995 Register DestReg, int FrameIdx,
996 const TargetRegisterClass *RC,
997 const TargetRegisterInfo *TRI,
998 Register VReg) const {
999 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1000
1001 // Callers may expect a single instruction, so keep 128-bit moves
1002 // together for now and lower them after register allocation.
1003 unsigned LoadOpcode, StoreOpcode;
1004 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
1005 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
1006 FrameIdx);
1007}
1008
1009// Return true if MI is a simple load or store with a 12-bit displacement
1010// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
1011static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
1012 const MCInstrDesc &MCID = MI->getDesc();
1013 return ((MCID.TSFlags & Flag) &&
1014 isUInt<12>(MI->getOperand(2).getImm()) &&
1015 MI->getOperand(3).getReg() == 0);
1016}
1017
1018namespace {
1019
1020struct LogicOp {
1021 LogicOp() = default;
1022 LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
1023 : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
1024
1025 explicit operator bool() const { return RegSize; }
1026
1027 unsigned RegSize = 0;
1028 unsigned ImmLSB = 0;
1029 unsigned ImmSize = 0;
1030};
1031
1032} // end anonymous namespace
1033
1034static LogicOp interpretAndImmediate(unsigned Opcode) {
1035 switch (Opcode) {
1036 case SystemZ::NILMux: return LogicOp(32, 0, 16);
1037 case SystemZ::NIHMux: return LogicOp(32, 16, 16);
1038 case SystemZ::NILL64: return LogicOp(64, 0, 16);
1039 case SystemZ::NILH64: return LogicOp(64, 16, 16);
1040 case SystemZ::NIHL64: return LogicOp(64, 32, 16);
1041 case SystemZ::NIHH64: return LogicOp(64, 48, 16);
1042 case SystemZ::NIFMux: return LogicOp(32, 0, 32);
1043 case SystemZ::NILF64: return LogicOp(64, 0, 32);
1044 case SystemZ::NIHF64: return LogicOp(64, 32, 32);
1045 default: return LogicOp();
1046 }
1047}
1048
1049static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
1050 if (OldMI->registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr)) {
1051 MachineOperand *CCDef =
1052 NewMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr);
1053 if (CCDef != nullptr)
1054 CCDef->setIsDead(true);
1055 }
1056}
1057
1058static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI,
1059 MachineInstr::MIFlag Flag) {
1060 if (OldMI->getFlag(Flag))
1061 NewMI->setFlag(Flag);
1062}
1063
1066 LiveIntervals *LIS) const {
1067 MachineBasicBlock *MBB = MI.getParent();
1068
1069 // Try to convert an AND into an RISBG-type instruction.
1070 // TODO: It might be beneficial to select RISBG and shorten to AND instead.
1071 if (LogicOp And = interpretAndImmediate(MI.getOpcode())) {
1072 uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
1073 // AND IMMEDIATE leaves the other bits of the register unchanged.
1074 Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
1075 unsigned Start, End;
1076 if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
1077 unsigned NewOpcode;
1078 if (And.RegSize == 64) {
1079 NewOpcode = SystemZ::RISBG;
1080 // Prefer RISBGN if available, since it does not clobber CC.
1081 if (STI.hasMiscellaneousExtensions())
1082 NewOpcode = SystemZ::RISBGN;
1083 } else {
1084 NewOpcode = SystemZ::RISBMux;
1085 Start &= 31;
1086 End &= 31;
1087 }
1088 MachineOperand &Dest = MI.getOperand(0);
1089 MachineOperand &Src = MI.getOperand(1);
1091 BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
1092 .add(Dest)
1093 .addReg(0)
1094 .addReg(Src.getReg(), getKillRegState(Src.isKill()),
1095 Src.getSubReg())
1096 .addImm(Start)
1097 .addImm(End + 128)
1098 .addImm(0);
1099 if (LV) {
1100 unsigned NumOps = MI.getNumOperands();
1101 for (unsigned I = 1; I < NumOps; ++I) {
1102 MachineOperand &Op = MI.getOperand(I);
1103 if (Op.isReg() && Op.isKill())
1104 LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1105 }
1106 }
1107 if (LIS)
1108 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1109 transferDeadCC(&MI, MIB);
1110 return MIB;
1111 }
1112 }
1113 return nullptr;
1114}
1115
1117 bool Invert) const {
1118 unsigned Opc = Inst.getOpcode();
1119 if (Invert) {
1120 auto InverseOpcode = getInverseOpcode(Opc);
1121 if (!InverseOpcode)
1122 return false;
1123 Opc = *InverseOpcode;
1124 }
1125
1126 switch (Opc) {
1127 default:
1128 break;
1129 // Adds and multiplications.
1130 case SystemZ::WFADB:
1131 case SystemZ::WFASB:
1132 case SystemZ::WFAXB:
1133 case SystemZ::VFADB:
1134 case SystemZ::VFASB:
1135 case SystemZ::WFMDB:
1136 case SystemZ::WFMSB:
1137 case SystemZ::WFMXB:
1138 case SystemZ::VFMDB:
1139 case SystemZ::VFMSB:
1142 }
1143
1144 return false;
1145}
1146
1147std::optional<unsigned>
1149 // fadd => fsub
1150 switch (Opcode) {
1151 case SystemZ::WFADB:
1152 return SystemZ::WFSDB;
1153 case SystemZ::WFASB:
1154 return SystemZ::WFSSB;
1155 case SystemZ::WFAXB:
1156 return SystemZ::WFSXB;
1157 case SystemZ::VFADB:
1158 return SystemZ::VFSDB;
1159 case SystemZ::VFASB:
1160 return SystemZ::VFSSB;
1161 // fsub => fadd
1162 case SystemZ::WFSDB:
1163 return SystemZ::WFADB;
1164 case SystemZ::WFSSB:
1165 return SystemZ::WFASB;
1166 case SystemZ::WFSXB:
1167 return SystemZ::WFAXB;
1168 case SystemZ::VFSDB:
1169 return SystemZ::VFADB;
1170 case SystemZ::VFSSB:
1171 return SystemZ::VFASB;
1172 default:
1173 return std::nullopt;
1174 }
1175}
1176
1179 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1180 LiveIntervals *LIS, VirtRegMap *VRM) const {
1183 const MachineFrameInfo &MFI = MF.getFrameInfo();
1184 unsigned Size = MFI.getObjectSize(FrameIndex);
1185 unsigned Opcode = MI.getOpcode();
1186
1187 // Check CC liveness if new instruction introduces a dead def of CC.
1188 SlotIndex MISlot = SlotIndex();
1189 LiveRange *CCLiveRange = nullptr;
1190 bool CCLiveAtMI = true;
1191 if (LIS) {
1192 MISlot = LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
1193 auto CCUnits = TRI->regunits(MCRegister::from(SystemZ::CC));
1194 assert(range_size(CCUnits) == 1 && "CC only has one reg unit.");
1195 CCLiveRange = &LIS->getRegUnit(*CCUnits.begin());
1196 CCLiveAtMI = CCLiveRange->liveAt(MISlot);
1197 }
1198
1199 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1200 if (!CCLiveAtMI && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
1201 isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
1202 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1203 MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
1204 MI.getDebugLoc(), get(SystemZ::AGSI))
1205 .addFrameIndex(FrameIndex)
1206 .addImm(0)
1207 .addImm(MI.getOperand(2).getImm());
1208 BuiltMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr)
1209 ->setIsDead(true);
1210 CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1211 return BuiltMI;
1212 }
1213 return nullptr;
1214 }
1215
1216 // All other cases require a single operand.
1217 if (Ops.size() != 1)
1218 return nullptr;
1219
1220 unsigned OpNum = Ops[0];
1221 assert(Size * 8 ==
1222 TRI->getRegSizeInBits(*MF.getRegInfo()
1223 .getRegClass(MI.getOperand(OpNum).getReg())) &&
1224 "Invalid size combination");
1225
1226 if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
1227 isInt<8>(MI.getOperand(2).getImm())) {
1228 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1229 Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
1230 MachineInstr *BuiltMI =
1231 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1232 .addFrameIndex(FrameIndex)
1233 .addImm(0)
1234 .addImm(MI.getOperand(2).getImm());
1235 transferDeadCC(&MI, BuiltMI);
1237 return BuiltMI;
1238 }
1239
1240 if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
1241 isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
1242 (Opcode == SystemZ::ALGFI && OpNum == 0 &&
1243 isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
1244 // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1245 Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
1246 MachineInstr *BuiltMI =
1247 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1248 .addFrameIndex(FrameIndex)
1249 .addImm(0)
1250 .addImm((int8_t)MI.getOperand(2).getImm());
1251 transferDeadCC(&MI, BuiltMI);
1252 return BuiltMI;
1253 }
1254
1255 if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
1256 isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
1257 (Opcode == SystemZ::SLGFI && OpNum == 0 &&
1258 isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
1259 // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1260 Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
1261 MachineInstr *BuiltMI =
1262 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1263 .addFrameIndex(FrameIndex)
1264 .addImm(0)
1265 .addImm((int8_t)-MI.getOperand(2).getImm());
1266 transferDeadCC(&MI, BuiltMI);
1267 return BuiltMI;
1268 }
1269
1270 unsigned MemImmOpc = 0;
1271 switch (Opcode) {
1272 case SystemZ::LHIMux:
1273 case SystemZ::LHI: MemImmOpc = SystemZ::MVHI; break;
1274 case SystemZ::LGHI: MemImmOpc = SystemZ::MVGHI; break;
1275 case SystemZ::CHIMux:
1276 case SystemZ::CHI: MemImmOpc = SystemZ::CHSI; break;
1277 case SystemZ::CGHI: MemImmOpc = SystemZ::CGHSI; break;
1278 case SystemZ::CLFIMux:
1279 case SystemZ::CLFI:
1280 if (isUInt<16>(MI.getOperand(1).getImm()))
1281 MemImmOpc = SystemZ::CLFHSI;
1282 break;
1283 case SystemZ::CLGFI:
1284 if (isUInt<16>(MI.getOperand(1).getImm()))
1285 MemImmOpc = SystemZ::CLGHSI;
1286 break;
1287 default: break;
1288 }
1289 if (MemImmOpc)
1290 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1291 get(MemImmOpc))
1292 .addFrameIndex(FrameIndex)
1293 .addImm(0)
1294 .addImm(MI.getOperand(1).getImm());
1295
1296 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
1297 bool Op0IsGPR = (Opcode == SystemZ::LGDR);
1298 bool Op1IsGPR = (Opcode == SystemZ::LDGR);
1299 // If we're spilling the destination of an LDGR or LGDR, store the
1300 // source register instead.
1301 if (OpNum == 0) {
1302 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
1303 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1304 get(StoreOpcode))
1305 .add(MI.getOperand(1))
1306 .addFrameIndex(FrameIndex)
1307 .addImm(0)
1308 .addReg(0);
1309 }
1310 // If we're spilling the source of an LDGR or LGDR, load the
1311 // destination register instead.
1312 if (OpNum == 1) {
1313 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
1314 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1315 get(LoadOpcode))
1316 .add(MI.getOperand(0))
1317 .addFrameIndex(FrameIndex)
1318 .addImm(0)
1319 .addReg(0);
1320 }
1321 }
1322
1323 // Look for cases where the source of a simple store or the destination
1324 // of a simple load is being spilled. Try to use MVC instead.
1325 //
1326 // Although MVC is in practice a fast choice in these cases, it is still
1327 // logically a bytewise copy. This means that we cannot use it if the
1328 // load or store is volatile. We also wouldn't be able to use MVC if
1329 // the two memories partially overlap, but that case cannot occur here,
1330 // because we know that one of the memories is a full frame index.
1331 //
1332 // For performance reasons, we also want to avoid using MVC if the addresses
1333 // might be equal. We don't worry about that case here, because spill slot
1334 // coloring happens later, and because we have special code to remove
1335 // MVCs that turn out to be redundant.
1336 if (OpNum == 0 && MI.hasOneMemOperand()) {
1337 MachineMemOperand *MMO = *MI.memoperands_begin();
1338 if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) {
1339 // Handle conversion of loads.
1341 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1342 get(SystemZ::MVC))
1343 .addFrameIndex(FrameIndex)
1344 .addImm(0)
1345 .addImm(Size)
1346 .add(MI.getOperand(1))
1347 .addImm(MI.getOperand(2).getImm())
1348 .addMemOperand(MMO);
1349 }
1350 // Handle conversion of stores.
1352 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1353 get(SystemZ::MVC))
1354 .add(MI.getOperand(1))
1355 .addImm(MI.getOperand(2).getImm())
1356 .addImm(Size)
1357 .addFrameIndex(FrameIndex)
1358 .addImm(0)
1359 .addMemOperand(MMO);
1360 }
1361 }
1362 }
1363
1364 // If the spilled operand is the final one or the instruction is
1365 // commutable, try to change <INSN>R into <INSN>. Don't introduce a def of
1366 // CC if it is live and MI does not define it.
1367 unsigned NumOps = MI.getNumExplicitOperands();
1368 int MemOpcode = SystemZ::getMemOpcode(Opcode);
1369 if (MemOpcode == -1 ||
1370 (CCLiveAtMI && !MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) &&
1371 get(MemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)))
1372 return nullptr;
1373
1374 // Check if all other vregs have a usable allocation in the case of vector
1375 // to FP conversion.
1376 const MCInstrDesc &MCID = MI.getDesc();
1377 for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) {
1378 const MCOperandInfo &MCOI = MCID.operands()[I];
1379 if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum)
1380 continue;
1381 const TargetRegisterClass *RC = TRI->getRegClass(MCOI.RegClass);
1382 if (RC == &SystemZ::VR32BitRegClass || RC == &SystemZ::VR64BitRegClass) {
1383 Register Reg = MI.getOperand(I).getReg();
1384 Register PhysReg = Reg.isVirtual()
1385 ? (VRM ? Register(VRM->getPhys(Reg)) : Register())
1386 : Reg;
1387 if (!PhysReg ||
1388 !(SystemZ::FP32BitRegClass.contains(PhysReg) ||
1389 SystemZ::FP64BitRegClass.contains(PhysReg) ||
1390 SystemZ::VF128BitRegClass.contains(PhysReg)))
1391 return nullptr;
1392 }
1393 }
1394 // Fused multiply and add/sub need to have the same dst and accumulator reg.
1395 bool FusedFPOp = (Opcode == SystemZ::WFMADB || Opcode == SystemZ::WFMASB ||
1396 Opcode == SystemZ::WFMSDB || Opcode == SystemZ::WFMSSB);
1397 if (FusedFPOp) {
1398 Register DstReg = VRM->getPhys(MI.getOperand(0).getReg());
1399 Register AccReg = VRM->getPhys(MI.getOperand(3).getReg());
1400 if (OpNum == 0 || OpNum == 3 || DstReg != AccReg)
1401 return nullptr;
1402 }
1403
1404 // Try to swap compare operands if possible.
1405 bool NeedsCommute = false;
1406 if ((MI.getOpcode() == SystemZ::CR || MI.getOpcode() == SystemZ::CGR ||
1407 MI.getOpcode() == SystemZ::CLR || MI.getOpcode() == SystemZ::CLGR ||
1408 MI.getOpcode() == SystemZ::WFCDB || MI.getOpcode() == SystemZ::WFCSB ||
1409 MI.getOpcode() == SystemZ::WFKDB || MI.getOpcode() == SystemZ::WFKSB) &&
1410 OpNum == 0 && prepareCompareSwapOperands(MI))
1411 NeedsCommute = true;
1412
1413 bool CCOperands = false;
1414 if (MI.getOpcode() == SystemZ::LOCRMux || MI.getOpcode() == SystemZ::LOCGR ||
1415 MI.getOpcode() == SystemZ::SELRMux || MI.getOpcode() == SystemZ::SELGR) {
1416 assert(MI.getNumOperands() == 6 && NumOps == 5 &&
1417 "LOCR/SELR instruction operands corrupt?");
1418 NumOps -= 2;
1419 CCOperands = true;
1420 }
1421
1422 // See if this is a 3-address instruction that is convertible to 2-address
1423 // and suitable for folding below. Only try this with virtual registers
1424 // and a provided VRM (during regalloc).
1425 if (NumOps == 3 && SystemZ::getTargetMemOpcode(MemOpcode) != -1) {
1426 if (VRM == nullptr)
1427 return nullptr;
1428 else {
1429 Register DstReg = MI.getOperand(0).getReg();
1430 Register DstPhys =
1431 (DstReg.isVirtual() ? Register(VRM->getPhys(DstReg)) : DstReg);
1432 Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
1433 : ((OpNum == 1 && MI.isCommutable())
1434 ? MI.getOperand(2).getReg()
1435 : Register()));
1436 if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
1437 SrcReg.isVirtual() && DstPhys == VRM->getPhys(SrcReg))
1438 NeedsCommute = (OpNum == 1);
1439 else
1440 return nullptr;
1441 }
1442 }
1443
1444 if ((OpNum == NumOps - 1) || NeedsCommute || FusedFPOp) {
1445 const MCInstrDesc &MemDesc = get(MemOpcode);
1446 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
1447 assert(AccessBytes != 0 && "Size of access should be known");
1448 assert(AccessBytes <= Size && "Access outside the frame index");
1449 uint64_t Offset = Size - AccessBytes;
1450 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
1451 MI.getDebugLoc(), get(MemOpcode));
1452 if (MI.isCompare()) {
1453 assert(NumOps == 2 && "Expected 2 register operands for a compare.");
1454 MIB.add(MI.getOperand(NeedsCommute ? 1 : 0));
1455 }
1456 else if (FusedFPOp) {
1457 MIB.add(MI.getOperand(0));
1458 MIB.add(MI.getOperand(3));
1459 MIB.add(MI.getOperand(OpNum == 1 ? 2 : 1));
1460 }
1461 else {
1462 MIB.add(MI.getOperand(0));
1463 if (NeedsCommute)
1464 MIB.add(MI.getOperand(2));
1465 else
1466 for (unsigned I = 1; I < OpNum; ++I)
1467 MIB.add(MI.getOperand(I));
1468 }
1469 MIB.addFrameIndex(FrameIndex).addImm(Offset);
1470 if (MemDesc.TSFlags & SystemZII::HasIndex)
1471 MIB.addReg(0);
1472 if (CCOperands) {
1473 unsigned CCValid = MI.getOperand(NumOps).getImm();
1474 unsigned CCMask = MI.getOperand(NumOps + 1).getImm();
1475 MIB.addImm(CCValid);
1476 MIB.addImm(NeedsCommute ? CCMask ^ CCValid : CCMask);
1477 }
1478 if (MIB->definesRegister(SystemZ::CC, /*TRI=*/nullptr) &&
1479 (!MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) ||
1480 MI.registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr))) {
1481 MIB->addRegisterDead(SystemZ::CC, TRI);
1482 if (CCLiveRange)
1483 CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1484 }
1485 // Constrain the register classes if converted from a vector opcode. The
1486 // allocated regs are in an FP reg-class per previous check above.
1487 for (const MachineOperand &MO : MIB->operands())
1488 if (MO.isReg() && MO.getReg().isVirtual()) {
1489 Register Reg = MO.getReg();
1490 if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass)
1491 MRI.setRegClass(Reg, &SystemZ::FP32BitRegClass);
1492 else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass)
1493 MRI.setRegClass(Reg, &SystemZ::FP64BitRegClass);
1494 else if (MRI.getRegClass(Reg) == &SystemZ::VR128BitRegClass)
1495 MRI.setRegClass(Reg, &SystemZ::VF128BitRegClass);
1496 }
1497
1498 transferDeadCC(&MI, MIB);
1501 return MIB;
1502 }
1503
1504 return nullptr;
1505}
1506
1509 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1510 LiveIntervals *LIS) const {
1512 MachineBasicBlock *MBB = MI.getParent();
1513
1514 // For reassociable FP operations, any loads have been purposefully left
1515 // unfolded so that MachineCombiner can do its work on reg/reg
1516 // opcodes. After that, as many loads as possible are now folded.
1517 // TODO: This may be beneficial with other opcodes as well as machine-sink
1518 // can move loads close to their user in a different MBB, which the isel
1519 // matcher did not see.
1520 unsigned LoadOpc = 0;
1521 unsigned RegMemOpcode = 0;
1522 const TargetRegisterClass *FPRC = nullptr;
1523 RegMemOpcode = MI.getOpcode() == SystemZ::WFADB ? SystemZ::ADB
1524 : MI.getOpcode() == SystemZ::WFSDB ? SystemZ::SDB
1525 : MI.getOpcode() == SystemZ::WFMDB ? SystemZ::MDB
1526 : 0;
1527 if (RegMemOpcode) {
1528 LoadOpc = SystemZ::VL64;
1529 FPRC = &SystemZ::FP64BitRegClass;
1530 } else {
1531 RegMemOpcode = MI.getOpcode() == SystemZ::WFASB ? SystemZ::AEB
1532 : MI.getOpcode() == SystemZ::WFSSB ? SystemZ::SEB
1533 : MI.getOpcode() == SystemZ::WFMSB ? SystemZ::MEEB
1534 : 0;
1535 if (RegMemOpcode) {
1536 LoadOpc = SystemZ::VL32;
1537 FPRC = &SystemZ::FP32BitRegClass;
1538 }
1539 }
1540 if (!RegMemOpcode || LoadMI.getOpcode() != LoadOpc)
1541 return nullptr;
1542
1543 // If RegMemOpcode clobbers CC, first make sure CC is not live at this point.
1544 if (get(RegMemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)) {
1545 assert(LoadMI.getParent() == MI.getParent() && "Assuming a local fold.");
1546 assert(LoadMI != InsertPt && "Assuming InsertPt not to be first in MBB.");
1547 for (MachineBasicBlock::iterator MII = std::prev(InsertPt);;
1548 --MII) {
1549 if (MII->definesRegister(SystemZ::CC, /*TRI=*/nullptr)) {
1550 if (!MII->registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr))
1551 return nullptr;
1552 break;
1553 }
1554 if (MII == MBB->begin()) {
1555 if (MBB->isLiveIn(SystemZ::CC))
1556 return nullptr;
1557 break;
1558 }
1559 }
1560 }
1561
1562 Register FoldAsLoadDefReg = LoadMI.getOperand(0).getReg();
1563 if (Ops.size() != 1 || FoldAsLoadDefReg != MI.getOperand(Ops[0]).getReg())
1564 return nullptr;
1565 Register DstReg = MI.getOperand(0).getReg();
1566 MachineOperand LHS = MI.getOperand(1);
1567 MachineOperand RHS = MI.getOperand(2);
1568 MachineOperand &RegMO = RHS.getReg() == FoldAsLoadDefReg ? LHS : RHS;
1569 if ((RegMemOpcode == SystemZ::SDB || RegMemOpcode == SystemZ::SEB) &&
1570 FoldAsLoadDefReg != RHS.getReg())
1571 return nullptr;
1572
1573 MachineOperand &Base = LoadMI.getOperand(1);
1574 MachineOperand &Disp = LoadMI.getOperand(2);
1575 MachineOperand &Indx = LoadMI.getOperand(3);
1577 BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(RegMemOpcode), DstReg)
1578 .add(RegMO)
1579 .add(Base)
1580 .add(Disp)
1581 .add(Indx);
1582 MIB->addRegisterDead(SystemZ::CC, &RI);
1583 MRI->setRegClass(DstReg, FPRC);
1584 MRI->setRegClass(RegMO.getReg(), FPRC);
1586
1587 return MIB;
1588}
1589
1591 switch (MI.getOpcode()) {
1592 case SystemZ::L128:
1593 splitMove(MI, SystemZ::LG);
1594 return true;
1595
1596 case SystemZ::ST128:
1597 splitMove(MI, SystemZ::STG);
1598 return true;
1599
1600 case SystemZ::LX:
1601 splitMove(MI, SystemZ::LD);
1602 return true;
1603
1604 case SystemZ::STX:
1605 splitMove(MI, SystemZ::STD);
1606 return true;
1607
1608 case SystemZ::LBMux:
1609 expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
1610 return true;
1611
1612 case SystemZ::LHMux:
1613 expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
1614 return true;
1615
1616 case SystemZ::LLCRMux:
1617 expandZExtPseudo(MI, SystemZ::LLCR, 8);
1618 return true;
1619
1620 case SystemZ::LLHRMux:
1621 expandZExtPseudo(MI, SystemZ::LLHR, 16);
1622 return true;
1623
1624 case SystemZ::LLCMux:
1625 expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
1626 return true;
1627
1628 case SystemZ::LLHMux:
1629 expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
1630 return true;
1631
1632 case SystemZ::LMux:
1633 expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
1634 return true;
1635
1636 case SystemZ::LOCMux:
1637 expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH);
1638 return true;
1639
1640 case SystemZ::LOCHIMux:
1641 expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);
1642 return true;
1643
1644 case SystemZ::STCMux:
1645 expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
1646 return true;
1647
1648 case SystemZ::STHMux:
1649 expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
1650 return true;
1651
1652 case SystemZ::STMux:
1653 expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
1654 return true;
1655
1656 case SystemZ::STOCMux:
1657 expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH);
1658 return true;
1659
1660 case SystemZ::LHIMux:
1661 expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
1662 return true;
1663
1664 case SystemZ::IIFMux:
1665 expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
1666 return true;
1667
1668 case SystemZ::IILMux:
1669 expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
1670 return true;
1671
1672 case SystemZ::IIHMux:
1673 expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
1674 return true;
1675
1676 case SystemZ::NIFMux:
1677 expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
1678 return true;
1679
1680 case SystemZ::NILMux:
1681 expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
1682 return true;
1683
1684 case SystemZ::NIHMux:
1685 expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
1686 return true;
1687
1688 case SystemZ::OIFMux:
1689 expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
1690 return true;
1691
1692 case SystemZ::OILMux:
1693 expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
1694 return true;
1695
1696 case SystemZ::OIHMux:
1697 expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
1698 return true;
1699
1700 case SystemZ::XIFMux:
1701 expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
1702 return true;
1703
1704 case SystemZ::TMLMux:
1705 expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
1706 return true;
1707
1708 case SystemZ::TMHMux:
1709 expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
1710 return true;
1711
1712 case SystemZ::AHIMux:
1713 expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
1714 return true;
1715
1716 case SystemZ::AHIMuxK:
1717 expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
1718 return true;
1719
1720 case SystemZ::AFIMux:
1721 expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
1722 return true;
1723
1724 case SystemZ::CHIMux:
1725 expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false);
1726 return true;
1727
1728 case SystemZ::CFIMux:
1729 expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
1730 return true;
1731
1732 case SystemZ::CLFIMux:
1733 expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
1734 return true;
1735
1736 case SystemZ::CMux:
1737 expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
1738 return true;
1739
1740 case SystemZ::CLMux:
1741 expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
1742 return true;
1743
1744 case SystemZ::RISBMux: {
1745 bool DestIsHigh = SystemZ::isHighReg(MI.getOperand(0).getReg());
1746 bool SrcIsHigh = SystemZ::isHighReg(MI.getOperand(2).getReg());
1747 if (SrcIsHigh == DestIsHigh)
1748 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
1749 else {
1750 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
1751 MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
1752 }
1753 return true;
1754 }
1755
1756 case SystemZ::ADJDYNALLOC:
1757 splitAdjDynAlloc(MI);
1758 return true;
1759
1760 case TargetOpcode::LOAD_STACK_GUARD:
1761 expandLoadStackGuard(&MI);
1762 return true;
1763
1764 default:
1765 return false;
1766 }
1767}
1768
1770 if (MI.isInlineAsm()) {
1771 const MachineFunction *MF = MI.getParent()->getParent();
1772 const char *AsmStr = MI.getOperand(0).getSymbolName();
1773 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1774 }
1775 else if (MI.getOpcode() == SystemZ::PATCHPOINT)
1777 else if (MI.getOpcode() == SystemZ::STACKMAP)
1778 return MI.getOperand(1).getImm();
1779 else if (MI.getOpcode() == SystemZ::FENTRY_CALL)
1780 return 6;
1781
1782 return MI.getDesc().getSize();
1783}
1784
1787 switch (MI.getOpcode()) {
1788 case SystemZ::BR:
1789 case SystemZ::BI:
1790 case SystemZ::J:
1791 case SystemZ::JG:
1793 SystemZ::CCMASK_ANY, &MI.getOperand(0));
1794
1795 case SystemZ::BRC:
1796 case SystemZ::BRCL:
1797 return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
1798 MI.getOperand(1).getImm(), &MI.getOperand(2));
1799
1800 case SystemZ::BRCT:
1801 case SystemZ::BRCTH:
1803 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1804
1805 case SystemZ::BRCTG:
1807 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1808
1809 case SystemZ::CIJ:
1810 case SystemZ::CRJ:
1812 MI.getOperand(2).getImm(), &MI.getOperand(3));
1813
1814 case SystemZ::CLIJ:
1815 case SystemZ::CLRJ:
1817 MI.getOperand(2).getImm(), &MI.getOperand(3));
1818
1819 case SystemZ::CGIJ:
1820 case SystemZ::CGRJ:
1822 MI.getOperand(2).getImm(), &MI.getOperand(3));
1823
1824 case SystemZ::CLGIJ:
1825 case SystemZ::CLGRJ:
1827 MI.getOperand(2).getImm(), &MI.getOperand(3));
1828
1829 case SystemZ::INLINEASM_BR:
1830 // Don't try to analyze asm goto, so pass nullptr as branch target argument.
1831 return SystemZII::Branch(SystemZII::AsmGoto, 0, 0, nullptr);
1832
1833 default:
1834 llvm_unreachable("Unrecognized branch opcode");
1835 }
1836}
1837
1839 unsigned &LoadOpcode,
1840 unsigned &StoreOpcode) const {
1841 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
1842 LoadOpcode = SystemZ::L;
1843 StoreOpcode = SystemZ::ST;
1844 } else if (RC == &SystemZ::GRH32BitRegClass) {
1845 LoadOpcode = SystemZ::LFH;
1846 StoreOpcode = SystemZ::STFH;
1847 } else if (RC == &SystemZ::GRX32BitRegClass) {
1848 LoadOpcode = SystemZ::LMux;
1849 StoreOpcode = SystemZ::STMux;
1850 } else if (RC == &SystemZ::GR64BitRegClass ||
1851 RC == &SystemZ::ADDR64BitRegClass) {
1852 LoadOpcode = SystemZ::LG;
1853 StoreOpcode = SystemZ::STG;
1854 } else if (RC == &SystemZ::GR128BitRegClass ||
1855 RC == &SystemZ::ADDR128BitRegClass) {
1856 LoadOpcode = SystemZ::L128;
1857 StoreOpcode = SystemZ::ST128;
1858 } else if (RC == &SystemZ::FP32BitRegClass) {
1859 LoadOpcode = SystemZ::LE;
1860 StoreOpcode = SystemZ::STE;
1861 } else if (RC == &SystemZ::FP64BitRegClass) {
1862 LoadOpcode = SystemZ::LD;
1863 StoreOpcode = SystemZ::STD;
1864 } else if (RC == &SystemZ::FP128BitRegClass) {
1865 LoadOpcode = SystemZ::LX;
1866 StoreOpcode = SystemZ::STX;
1867 } else if (RC == &SystemZ::VR32BitRegClass) {
1868 LoadOpcode = SystemZ::VL32;
1869 StoreOpcode = SystemZ::VST32;
1870 } else if (RC == &SystemZ::VR64BitRegClass) {
1871 LoadOpcode = SystemZ::VL64;
1872 StoreOpcode = SystemZ::VST64;
1873 } else if (RC == &SystemZ::VF128BitRegClass ||
1874 RC == &SystemZ::VR128BitRegClass) {
1875 LoadOpcode = SystemZ::VL;
1876 StoreOpcode = SystemZ::VST;
1877 } else
1878 llvm_unreachable("Unsupported regclass to load or store");
1879}
1880
1882 int64_t Offset,
1883 const MachineInstr *MI) const {
1884 const MCInstrDesc &MCID = get(Opcode);
1885 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
1886 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
1887 // Get the instruction to use for unsigned 12-bit displacements.
1888 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
1889 if (Disp12Opcode >= 0)
1890 return Disp12Opcode;
1891
1892 // All address-related instructions can use unsigned 12-bit
1893 // displacements.
1894 return Opcode;
1895 }
1896 if (isInt<20>(Offset) && isInt<20>(Offset2)) {
1897 // Get the instruction to use for signed 20-bit displacements.
1898 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
1899 if (Disp20Opcode >= 0)
1900 return Disp20Opcode;
1901
1902 // Check whether Opcode allows signed 20-bit displacements.
1904 return Opcode;
1905
1906 // If a VR32/VR64 reg ended up in an FP register, use the FP opcode.
1907 if (MI && MI->getOperand(0).isReg()) {
1908 Register Reg = MI->getOperand(0).getReg();
1909 if (Reg.isPhysical() && SystemZMC::getFirstReg(Reg) < 16) {
1910 switch (Opcode) {
1911 case SystemZ::VL32:
1912 return SystemZ::LEY;
1913 case SystemZ::VST32:
1914 return SystemZ::STEY;
1915 case SystemZ::VL64:
1916 return SystemZ::LDY;
1917 case SystemZ::VST64:
1918 return SystemZ::STDY;
1919 default: break;
1920 }
1921 }
1922 }
1923 }
1924 return 0;
1925}
1926
1928 const MCInstrDesc &MCID = get(Opcode);
1930 return SystemZ::getDisp12Opcode(Opcode) >= 0;
1931 return SystemZ::getDisp20Opcode(Opcode) >= 0;
1932}
1933
1934unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
1935 switch (Opcode) {
1936 case SystemZ::L: return SystemZ::LT;
1937 case SystemZ::LY: return SystemZ::LT;
1938 case SystemZ::LG: return SystemZ::LTG;
1939 case SystemZ::LGF: return SystemZ::LTGF;
1940 case SystemZ::LR: return SystemZ::LTR;
1941 case SystemZ::LGFR: return SystemZ::LTGFR;
1942 case SystemZ::LGR: return SystemZ::LTGR;
1943 case SystemZ::LCDFR: return SystemZ::LCDBR;
1944 case SystemZ::LPDFR: return SystemZ::LPDBR;
1945 case SystemZ::LNDFR: return SystemZ::LNDBR;
1946 case SystemZ::LCDFR_32: return SystemZ::LCEBR;
1947 case SystemZ::LPDFR_32: return SystemZ::LPEBR;
1948 case SystemZ::LNDFR_32: return SystemZ::LNEBR;
1949 // On zEC12 we prefer to use RISBGN. But if there is a chance to
1950 // actually use the condition code, we may turn it back into RISGB.
1951 // Note that RISBG is not really a "load-and-test" instruction,
1952 // but sets the same condition code values, so is OK to use here.
1953 case SystemZ::RISBGN: return SystemZ::RISBG;
1954 default: return 0;
1955 }
1956}
1957
1958bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
1959 unsigned &Start, unsigned &End) const {
1960 // Reject trivial all-zero masks.
1961 Mask &= allOnes(BitSize);
1962 if (Mask == 0)
1963 return false;
1964
1965 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
1966 // the msb and End specifies the index of the lsb.
1967 unsigned LSB, Length;
1968 if (isShiftedMask_64(Mask, LSB, Length)) {
1969 Start = 63 - (LSB + Length - 1);
1970 End = 63 - LSB;
1971 return true;
1972 }
1973
1974 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
1975 // of the low 1s and End specifies the lsb of the high 1s.
1976 if (isShiftedMask_64(Mask ^ allOnes(BitSize), LSB, Length)) {
1977 assert(LSB > 0 && "Bottom bit must be set");
1978 assert(LSB + Length < BitSize && "Top bit must be set");
1979 Start = 63 - (LSB - 1);
1980 End = 63 - (LSB + Length);
1981 return true;
1982 }
1983
1984 return false;
1985}
1986
1987unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
1989 const MachineInstr *MI) const {
1990 switch (Opcode) {
1991 case SystemZ::CHI:
1992 case SystemZ::CGHI:
1993 if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
1994 return 0;
1995 break;
1996 case SystemZ::CLFI:
1997 case SystemZ::CLGFI:
1998 if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
1999 return 0;
2000 break;
2001 case SystemZ::CL:
2002 case SystemZ::CLG:
2003 if (!STI.hasMiscellaneousExtensions())
2004 return 0;
2005 if (!(MI && MI->getOperand(3).getReg() == 0))
2006 return 0;
2007 break;
2008 }
2009 switch (Type) {
2011 switch (Opcode) {
2012 case SystemZ::CR:
2013 return SystemZ::CRJ;
2014 case SystemZ::CGR:
2015 return SystemZ::CGRJ;
2016 case SystemZ::CHI:
2017 return SystemZ::CIJ;
2018 case SystemZ::CGHI:
2019 return SystemZ::CGIJ;
2020 case SystemZ::CLR:
2021 return SystemZ::CLRJ;
2022 case SystemZ::CLGR:
2023 return SystemZ::CLGRJ;
2024 case SystemZ::CLFI:
2025 return SystemZ::CLIJ;
2026 case SystemZ::CLGFI:
2027 return SystemZ::CLGIJ;
2028 default:
2029 return 0;
2030 }
2032 switch (Opcode) {
2033 case SystemZ::CR:
2034 return SystemZ::CRBReturn;
2035 case SystemZ::CGR:
2036 return SystemZ::CGRBReturn;
2037 case SystemZ::CHI:
2038 return SystemZ::CIBReturn;
2039 case SystemZ::CGHI:
2040 return SystemZ::CGIBReturn;
2041 case SystemZ::CLR:
2042 return SystemZ::CLRBReturn;
2043 case SystemZ::CLGR:
2044 return SystemZ::CLGRBReturn;
2045 case SystemZ::CLFI:
2046 return SystemZ::CLIBReturn;
2047 case SystemZ::CLGFI:
2048 return SystemZ::CLGIBReturn;
2049 default:
2050 return 0;
2051 }
2053 switch (Opcode) {
2054 case SystemZ::CR:
2055 return SystemZ::CRBCall;
2056 case SystemZ::CGR:
2057 return SystemZ::CGRBCall;
2058 case SystemZ::CHI:
2059 return SystemZ::CIBCall;
2060 case SystemZ::CGHI:
2061 return SystemZ::CGIBCall;
2062 case SystemZ::CLR:
2063 return SystemZ::CLRBCall;
2064 case SystemZ::CLGR:
2065 return SystemZ::CLGRBCall;
2066 case SystemZ::CLFI:
2067 return SystemZ::CLIBCall;
2068 case SystemZ::CLGFI:
2069 return SystemZ::CLGIBCall;
2070 default:
2071 return 0;
2072 }
2074 switch (Opcode) {
2075 case SystemZ::CR:
2076 return SystemZ::CRT;
2077 case SystemZ::CGR:
2078 return SystemZ::CGRT;
2079 case SystemZ::CHI:
2080 return SystemZ::CIT;
2081 case SystemZ::CGHI:
2082 return SystemZ::CGIT;
2083 case SystemZ::CLR:
2084 return SystemZ::CLRT;
2085 case SystemZ::CLGR:
2086 return SystemZ::CLGRT;
2087 case SystemZ::CLFI:
2088 return SystemZ::CLFIT;
2089 case SystemZ::CLGFI:
2090 return SystemZ::CLGIT;
2091 case SystemZ::CL:
2092 return SystemZ::CLT;
2093 case SystemZ::CLG:
2094 return SystemZ::CLGT;
2095 default:
2096 return 0;
2097 }
2098 }
2099 return 0;
2100}
2101
2104 assert(MBBI->isCompare() && MBBI->getOperand(0).isReg() &&
2105 MBBI->getOperand(1).isReg() && !MBBI->mayLoad() &&
2106 "Not a compare reg/reg.");
2107
2109 bool CCLive = true;
2111 for (MachineInstr &MI : llvm::make_range(std::next(MBBI), MBB->end())) {
2112 if (MI.readsRegister(SystemZ::CC, /*TRI=*/nullptr)) {
2113 unsigned Flags = MI.getDesc().TSFlags;
2114 if ((Flags & SystemZII::CCMaskFirst) || (Flags & SystemZII::CCMaskLast))
2115 CCUsers.push_back(&MI);
2116 else
2117 return false;
2118 }
2119 if (MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr)) {
2120 CCLive = false;
2121 break;
2122 }
2123 }
2124 if (CCLive) {
2126 LiveRegs.addLiveOuts(*MBB);
2127 if (!LiveRegs.available(SystemZ::CC))
2128 return false;
2129 }
2130
2131 // Update all CC users.
2132 for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) {
2133 unsigned Flags = CCUsers[Idx]->getDesc().TSFlags;
2134 unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ?
2135 0 : CCUsers[Idx]->getNumExplicitOperands() - 2);
2136 MachineOperand &CCMaskMO = CCUsers[Idx]->getOperand(FirstOpNum + 1);
2137 unsigned NewCCMask = SystemZ::reverseCCMask(CCMaskMO.getImm());
2138 CCMaskMO.setImm(NewCCMask);
2139 }
2140
2141 return true;
2142}
2143
2144unsigned SystemZ::reverseCCMask(unsigned CCMask) {
2145 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
2148 (CCMask & SystemZ::CCMASK_CMP_UO));
2149}
2150
2152 MachineFunction &MF = *MBB->getParent();
2154 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
2155 return NewMBB;
2156}
2157
2161 NewMBB->splice(NewMBB->begin(), MBB,
2162 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
2164 return NewMBB;
2165}
2166
2170 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
2172 return NewMBB;
2173}
2174
2175unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const {
2176 if (!STI.hasLoadAndTrap())
2177 return 0;
2178 switch (Opcode) {
2179 case SystemZ::L:
2180 case SystemZ::LY:
2181 return SystemZ::LAT;
2182 case SystemZ::LG:
2183 return SystemZ::LGAT;
2184 case SystemZ::LFH:
2185 return SystemZ::LFHAT;
2186 case SystemZ::LLGF:
2187 return SystemZ::LLGFAT;
2188 case SystemZ::LLGT:
2189 return SystemZ::LLGTAT;
2190 }
2191 return 0;
2192}
2193
2196 unsigned Reg, uint64_t Value) const {
2197 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
2198 unsigned Opcode = 0;
2199 if (isInt<16>(Value))
2200 Opcode = SystemZ::LGHI;
2201 else if (SystemZ::isImmLL(Value))
2202 Opcode = SystemZ::LLILL;
2203 else if (SystemZ::isImmLH(Value)) {
2204 Opcode = SystemZ::LLILH;
2205 Value >>= 16;
2206 }
2207 else if (isInt<32>(Value))
2208 Opcode = SystemZ::LGFI;
2209 if (Opcode) {
2210 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
2211 return;
2212 }
2213
2215 assert (MRI.isSSA() && "Huge values only handled before reg-alloc .");
2216 Register Reg0 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2217 Register Reg1 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2218 BuildMI(MBB, MBBI, DL, get(SystemZ::IMPLICIT_DEF), Reg0);
2219 BuildMI(MBB, MBBI, DL, get(SystemZ::IIHF64), Reg1)
2220 .addReg(Reg0).addImm(Value >> 32);
2221 BuildMI(MBB, MBBI, DL, get(SystemZ::IILF64), Reg)
2222 .addReg(Reg1).addImm(Value & ((uint64_t(1) << 32) - 1));
2223}
2224
2226 StringRef &ErrInfo) const {
2227 const MCInstrDesc &MCID = MI.getDesc();
2228 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
2229 if (I >= MCID.getNumOperands())
2230 break;
2231 const MachineOperand &Op = MI.getOperand(I);
2232 const MCOperandInfo &MCOI = MCID.operands()[I];
2233 // Addressing modes have register and immediate operands. Op should be a
2234 // register (or frame index) operand if MCOI.RegClass contains a valid
2235 // register class, or an immediate otherwise.
2236 if (MCOI.OperandType == MCOI::OPERAND_MEMORY &&
2237 ((MCOI.RegClass != -1 && !Op.isReg() && !Op.isFI()) ||
2238 (MCOI.RegClass == -1 && !Op.isImm()))) {
2239 ErrInfo = "Addressing mode operands corrupt!";
2240 return false;
2241 }
2242 }
2243
2244 return true;
2245}
2246
2249 const MachineInstr &MIb) const {
2250
2251 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
2252 return false;
2253
2254 // If mem-operands show that the same address Value is used by both
2255 // instructions, check for non-overlapping offsets and widths. Not
2256 // sure if a register based analysis would be an improvement...
2257
2258 MachineMemOperand *MMOa = *MIa.memoperands_begin();
2259 MachineMemOperand *MMOb = *MIb.memoperands_begin();
2260 const Value *VALa = MMOa->getValue();
2261 const Value *VALb = MMOb->getValue();
2262 bool SameVal = (VALa && VALb && (VALa == VALb));
2263 if (!SameVal) {
2264 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
2265 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
2266 if (PSVa && PSVb && (PSVa == PSVb))
2267 SameVal = true;
2268 }
2269 if (SameVal) {
2270 int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset();
2271 LocationSize WidthA = MMOa->getSize(), WidthB = MMOb->getSize();
2272 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
2273 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
2274 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2275 if (LowWidth.hasValue() &&
2276 LowOffset + (int)LowWidth.getValue() <= HighOffset)
2277 return true;
2278 }
2279
2280 return false;
2281}
2282
2284 const Register Reg,
2285 int64_t &ImmVal) const {
2286
2287 if (MI.getOpcode() == SystemZ::VGBM && Reg == MI.getOperand(0).getReg()) {
2288 ImmVal = MI.getOperand(1).getImm();
2289 // TODO: Handle non-0 values
2290 return ImmVal == 0;
2291 }
2292
2293 return false;
2294}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
IRTranslator LLVM IR MI
A set of register units.
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag)
static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI)
static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI, MachineInstr::MIFlag Flag)
static int isSimpleMove(const MachineInstr &MI, int &FrameIndex, unsigned Flag)
static LogicOp interpretAndImmediate(unsigned Opcode)
static uint64_t allOnes(unsigned int Count)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
Value * RHS
Value * LHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
SlotIndexes * getSlotIndexes() const
VNInfo::Allocator & getVNInfoAllocator()
LiveRange & getRegUnit(unsigned Unit)
Return the live range for register unit Unit.
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
VNInfo * createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc)
createDeadDef - Make sure the range has a value defined at Def.
A set of register units used to track register liveness.
Definition: LiveRegUnits.h:30
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
Definition: LiveRegUnits.h:116
void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
bool hasValue() const
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:91
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
Definition: MCRegister.h:74
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:558
bool isSafeToMove(AAResults *AA, bool &SawStore) const
Return true if it is safe to move this instruction.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:341
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:391
bool registerDefIsDead(Register Reg, const TargetRegisterInfo *TRI) const
Returns true if the register is dead in this machine instruction.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:804
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:674
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:789
void setFlag(MIFlag Flag)
Set a MI flag.
Definition: MachineInstr.h:398
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:568
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition: StackMaps.h:76
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Definition: StackMaps.h:104
Special value supplied for machine level alias analysis.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:68
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:240
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:371
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A SystemZ-specific class detailing special use registers particular for calling conventions.
unsigned getLoadAndTrap(unsigned Opcode) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
unsigned getLoadAndTest(unsigned Opcode) const
bool isPredicable(const MachineInstr &MI) const override
bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset, const MachineInstr *MI=nullptr) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const override
SystemZInstrInfo(SystemZSubtarget &STI)
bool hasDisplacementPairInsn(unsigned Opcode) const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned CommuteOpIdx1, unsigned CommuteOpIdx2) const override
Commutes the operands in the given instruction by changing the operands order and/or changing the ins...
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
SystemZII::Branch getBranchInfo(const MachineInstr &MI) const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
unsigned getFusedCompare(unsigned Opcode, SystemZII::FusedCompareType Type, const MachineInstr *MI=nullptr) const
bool expandPostRAPseudo(MachineInstr &MBBI) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
void getLoadStoreOpcodes(const TargetRegisterClass *RC, unsigned &LoadOpcode, unsigned &StoreOpcode) const
bool isRxSBGMask(uint64_t Mask, unsigned BitSize, unsigned &Start, unsigned &End) const
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool prepareCompareSwapOperands(MachineBasicBlock::iterator MBBI) const
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
void loadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Reg, uint64_t Value) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
SystemZCallingConventionRegisters * getSpecialRegisters() const
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Target - Wrapper for Target specific information.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_MEMORY
Definition: MCInstrDesc.h:62
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
static unsigned getAccessSize(unsigned int Flags)
unsigned getFirstReg(unsigned Reg)
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
int getTargetMemOpcode(uint16_t Opcode)
const unsigned CCMASK_CMP_GT
Definition: SystemZ.h:37
const unsigned CCMASK_ANY
Definition: SystemZ.h:31
static bool isImmLL(uint64_t Val)
Definition: SystemZ.h:161
static bool isImmLH(uint64_t Val)
Definition: SystemZ.h:166
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
unsigned reverseCCMask(unsigned CCMask)
const unsigned IPM_CC
Definition: SystemZ.h:112
const unsigned CCMASK_CMP_EQ
Definition: SystemZ.h:35
const unsigned CCMASK_ICMP
Definition: SystemZ.h:47
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_CMP_LT
Definition: SystemZ.h:36
const unsigned CCMASK_CMP_NE
Definition: SystemZ.h:38
bool isHighReg(unsigned int Reg)
const unsigned CCMASK_CMP_UO
Definition: SystemZ.h:43
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
@ Length
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:269
constexpr size_t range_size(R &&Range)
Returns the size of the Range, i.e., the number of elements.
Definition: STLExtras.h:1705
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
unsigned getUndefRegState(bool B)
@ And
Bitwise or logical AND of integers.
unsigned getKillRegState(bool B)