LLVM 17.0.0git
SystemZInstrInfo.cpp
Go to the documentation of this file.
1//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the SystemZ implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SystemZInstrInfo.h"
15#include "SystemZ.h"
16#include "SystemZInstrBuilder.h"
17#include "SystemZSubtarget.h"
18#include "llvm/ADT/Statistic.h"
35#include "llvm/MC/MCInstrDesc.h"
41#include <cassert>
42#include <cstdint>
43#include <iterator>
44
45using namespace llvm;
46
47#define GET_INSTRINFO_CTOR_DTOR
48#define GET_INSTRMAP_INFO
49#include "SystemZGenInstrInfo.inc"
50
51#define DEBUG_TYPE "systemz-II"
52
53// Return a mask with Count low bits set.
54static uint64_t allOnes(unsigned int Count) {
55 return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
56}
57
58// Pin the vtable to this file.
59void SystemZInstrInfo::anchor() {}
60
62 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
63 RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister()),
64 STI(sti) {}
65
66// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
67// each having the opcode given by NewOpcode.
68void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
69 unsigned NewOpcode) const {
70 MachineBasicBlock *MBB = MI->getParent();
72
73 // Get two load or store instructions. Use the original instruction for one
74 // of them (arbitrarily the second here) and create a clone for the other.
75 MachineInstr *EarlierMI = MF.CloneMachineInstr(&*MI);
76 MBB->insert(MI, EarlierMI);
77
78 // Set up the two 64-bit registers and remember super reg and its flags.
79 MachineOperand &HighRegOp = EarlierMI->getOperand(0);
80 MachineOperand &LowRegOp = MI->getOperand(0);
81 Register Reg128 = LowRegOp.getReg();
82 unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
83 unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef());
84 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
85 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
86
87 if (MI->mayStore()) {
88 // Add implicit uses of the super register in case one of the subregs is
89 // undefined. We could track liveness and skip storing an undefined
90 // subreg, but this is hopefully rare (discovered with llvm-stress).
91 // If Reg128 was killed, set kill flag on MI.
92 unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
93 MachineInstrBuilder(MF, EarlierMI).addReg(Reg128, Reg128UndefImpl);
94 MachineInstrBuilder(MF, MI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
95 }
96
97 // The address in the first (high) instruction is already correct.
98 // Adjust the offset in the second (low) instruction.
99 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
100 MachineOperand &LowOffsetOp = MI->getOperand(2);
101 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
102
103 // Clear the kill flags on the registers in the first instruction.
104 if (EarlierMI->getOperand(0).isReg() && EarlierMI->getOperand(0).isUse())
105 EarlierMI->getOperand(0).setIsKill(false);
106 EarlierMI->getOperand(1).setIsKill(false);
107 EarlierMI->getOperand(3).setIsKill(false);
108
109 // Set the opcodes.
110 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
111 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
112 assert(HighOpcode && LowOpcode && "Both offsets should be in range");
113
114 EarlierMI->setDesc(get(HighOpcode));
115 MI->setDesc(get(LowOpcode));
116}
117
118// Split ADJDYNALLOC instruction MI.
119void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
120 MachineBasicBlock *MBB = MI->getParent();
121 MachineFunction &MF = *MBB->getParent();
122 MachineFrameInfo &MFFrame = MF.getFrameInfo();
123 MachineOperand &OffsetMO = MI->getOperand(2);
125
126 uint64_t Offset = (MFFrame.getMaxCallFrameSize() +
127 Regs->getCallFrameSize() +
128 Regs->getStackPointerBias() +
129 OffsetMO.getImm());
130 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
131 assert(NewOpcode && "No support for huge argument lists yet");
132 MI->setDesc(get(NewOpcode));
133 OffsetMO.setImm(Offset);
134}
135
136// MI is an RI-style pseudo instruction. Replace it with LowOpcode
137// if the first operand is a low GR32 and HighOpcode if the first operand
138// is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
139// and HighOpcode takes an unsigned 32-bit operand. In those cases,
140// MI has the same kind of operand as LowOpcode, so needs to be converted
141// if HighOpcode is used.
142void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
143 unsigned HighOpcode,
144 bool ConvertHigh) const {
145 Register Reg = MI.getOperand(0).getReg();
146 bool IsHigh = SystemZ::isHighReg(Reg);
147 MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
148 if (IsHigh && ConvertHigh)
149 MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
150}
151
152// MI is a three-operand RIE-style pseudo instruction. Replace it with
153// LowOpcodeK if the registers are both low GR32s, otherwise use a move
154// followed by HighOpcode or LowOpcode, depending on whether the target
155// is a high or low GR32.
156void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
157 unsigned LowOpcodeK,
158 unsigned HighOpcode) const {
159 Register DestReg = MI.getOperand(0).getReg();
160 Register SrcReg = MI.getOperand(1).getReg();
161 bool DestIsHigh = SystemZ::isHighReg(DestReg);
162 bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
163 if (!DestIsHigh && !SrcIsHigh)
164 MI.setDesc(get(LowOpcodeK));
165 else {
166 if (DestReg != SrcReg) {
167 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
168 SystemZ::LR, 32, MI.getOperand(1).isKill(),
169 MI.getOperand(1).isUndef());
170 MI.getOperand(1).setReg(DestReg);
171 }
172 MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
173 MI.tieOperands(0, 1);
174 }
175}
176
177// MI is an RXY-style pseudo instruction. Replace it with LowOpcode
178// if the first operand is a low GR32 and HighOpcode if the first operand
179// is a high GR32.
180void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
181 unsigned HighOpcode) const {
182 Register Reg = MI.getOperand(0).getReg();
183 unsigned Opcode = getOpcodeForOffset(
184 SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode,
185 MI.getOperand(2).getImm());
186 MI.setDesc(get(Opcode));
187}
188
189// MI is a load-on-condition pseudo instruction with a single register
190// (source or destination) operand. Replace it with LowOpcode if the
191// register is a low GR32 and HighOpcode if the register is a high GR32.
192void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
193 unsigned HighOpcode) const {
194 Register Reg = MI.getOperand(0).getReg();
195 unsigned Opcode = SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode;
196 MI.setDesc(get(Opcode));
197}
198
199// MI is an RR-style pseudo instruction that zero-extends the low Size bits
200// of one GRX32 into another. Replace it with LowOpcode if both operands
201// are low registers, otherwise use RISB[LH]G.
202void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
203 unsigned Size) const {
205 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
206 MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
207 Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef());
208
209 // Keep the remaining operands as-is.
210 for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2))
211 MIB.add(MO);
212
213 MI.eraseFromParent();
214}
215
216void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
217 MachineBasicBlock *MBB = MI->getParent();
218 MachineFunction &MF = *MBB->getParent();
219 const Register Reg64 = MI->getOperand(0).getReg();
220 const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);
221
222 // EAR can only load the low subregister so us a shift for %a0 to produce
223 // the GR containing %a0 and %a1.
224
225 // ear <reg>, %a0
226 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
227 .addReg(SystemZ::A0)
229
230 // sllg <reg>, <reg>, 32
231 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64)
232 .addReg(Reg64)
233 .addReg(0)
234 .addImm(32);
235
236 // ear <reg>, %a1
237 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
238 .addReg(SystemZ::A1);
239
240 // lg <reg>, 40(<reg>)
241 MI->setDesc(get(SystemZ::LG));
242 MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0);
243}
244
245// Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
246// DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
247// are low registers, otherwise use RISB[LH]G. Size is the number of bits
248// taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
249// KillSrc is true if this move is the last use of SrcReg.
251SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
253 const DebugLoc &DL, unsigned DestReg,
254 unsigned SrcReg, unsigned LowLowOpcode,
255 unsigned Size, bool KillSrc,
256 bool UndefSrc) const {
257 unsigned Opcode;
258 bool DestIsHigh = SystemZ::isHighReg(DestReg);
259 bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
260 if (DestIsHigh && SrcIsHigh)
261 Opcode = SystemZ::RISBHH;
262 else if (DestIsHigh && !SrcIsHigh)
263 Opcode = SystemZ::RISBHL;
264 else if (!DestIsHigh && SrcIsHigh)
265 Opcode = SystemZ::RISBLH;
266 else {
267 return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
268 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc));
269 }
270 unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
271 return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
272 .addReg(DestReg, RegState::Undef)
273 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc))
274 .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
275}
276
278 bool NewMI,
279 unsigned OpIdx1,
280 unsigned OpIdx2) const {
281 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
282 if (NewMI)
283 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
284 return MI;
285 };
286
287 switch (MI.getOpcode()) {
288 case SystemZ::SELRMux:
289 case SystemZ::SELFHR:
290 case SystemZ::SELR:
291 case SystemZ::SELGR:
292 case SystemZ::LOCRMux:
293 case SystemZ::LOCFHR:
294 case SystemZ::LOCR:
295 case SystemZ::LOCGR: {
296 auto &WorkingMI = cloneIfNew(MI);
297 // Invert condition.
298 unsigned CCValid = WorkingMI.getOperand(3).getImm();
299 unsigned CCMask = WorkingMI.getOperand(4).getImm();
300 WorkingMI.getOperand(4).setImm(CCMask ^ CCValid);
301 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
302 OpIdx1, OpIdx2);
303 }
304 default:
305 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
306 }
307}
308
309// If MI is a simple load or store for a frame object, return the register
310// it loads or stores and set FrameIndex to the index of the frame object.
311// Return 0 otherwise.
312//
313// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
314static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
315 unsigned Flag) {
316 const MCInstrDesc &MCID = MI.getDesc();
317 if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
318 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
319 FrameIndex = MI.getOperand(1).getIndex();
320 return MI.getOperand(0).getReg();
321 }
322 return 0;
323}
324
326 int &FrameIndex) const {
327 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
328}
329
331 int &FrameIndex) const {
332 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
333}
334
336 int &DestFrameIndex,
337 int &SrcFrameIndex) const {
338 // Check for MVC 0(Length,FI1),0(FI2)
339 const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo();
340 if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
341 MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
342 MI.getOperand(4).getImm() != 0)
343 return false;
344
345 // Check that Length covers the full slots.
346 int64_t Length = MI.getOperand(2).getImm();
347 unsigned FI1 = MI.getOperand(0).getIndex();
348 unsigned FI2 = MI.getOperand(3).getIndex();
349 if (MFI.getObjectSize(FI1) != Length ||
350 MFI.getObjectSize(FI2) != Length)
351 return false;
352
353 DestFrameIndex = FI1;
354 SrcFrameIndex = FI2;
355 return true;
356}
357
360 MachineBasicBlock *&FBB,
362 bool AllowModify) const {
363 // Most of the code and comments here are boilerplate.
364
365 // Start from the bottom of the block and work up, examining the
366 // terminator instructions.
368 while (I != MBB.begin()) {
369 --I;
370 if (I->isDebugInstr())
371 continue;
372
373 // Working from the bottom, when we see a non-terminator instruction, we're
374 // done.
375 if (!isUnpredicatedTerminator(*I))
376 break;
377
378 // A terminator that isn't a branch can't easily be handled by this
379 // analysis.
380 if (!I->isBranch())
381 return true;
382
383 // Can't handle indirect branches.
385 if (!Branch.hasMBBTarget())
386 return true;
387
388 // Punt on compound branches.
389 if (Branch.Type != SystemZII::BranchNormal)
390 return true;
391
392 if (Branch.CCMask == SystemZ::CCMASK_ANY) {
393 // Handle unconditional branches.
394 if (!AllowModify) {
395 TBB = Branch.getMBBTarget();
396 continue;
397 }
398
399 // If the block has any instructions after a JMP, delete them.
400 MBB.erase(std::next(I), MBB.end());
401
402 Cond.clear();
403 FBB = nullptr;
404
405 // Delete the JMP if it's equivalent to a fall-through.
406 if (MBB.isLayoutSuccessor(Branch.getMBBTarget())) {
407 TBB = nullptr;
408 I->eraseFromParent();
409 I = MBB.end();
410 continue;
411 }
412
413 // TBB is used to indicate the unconditinal destination.
414 TBB = Branch.getMBBTarget();
415 continue;
416 }
417
418 // Working from the bottom, handle the first conditional branch.
419 if (Cond.empty()) {
420 // FIXME: add X86-style branch swap
421 FBB = TBB;
422 TBB = Branch.getMBBTarget();
423 Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
424 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
425 continue;
426 }
427
428 // Handle subsequent conditional branches.
429 assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
430
431 // Only handle the case where all conditional branches branch to the same
432 // destination.
433 if (TBB != Branch.getMBBTarget())
434 return true;
435
436 // If the conditions are the same, we can leave them alone.
437 unsigned OldCCValid = Cond[0].getImm();
438 unsigned OldCCMask = Cond[1].getImm();
439 if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
440 continue;
441
442 // FIXME: Try combining conditions like X86 does. Should be easy on Z!
443 return false;
444 }
445
446 return false;
447}
448
450 int *BytesRemoved) const {
451 assert(!BytesRemoved && "code size not handled");
452
453 // Most of the code and comments here are boilerplate.
455 unsigned Count = 0;
456
457 while (I != MBB.begin()) {
458 --I;
459 if (I->isDebugInstr())
460 continue;
461 if (!I->isBranch())
462 break;
463 if (!getBranchInfo(*I).hasMBBTarget())
464 break;
465 // Remove the branch.
466 I->eraseFromParent();
467 I = MBB.end();
468 ++Count;
469 }
470
471 return Count;
472}
473
476 assert(Cond.size() == 2 && "Invalid condition");
477 Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
478 return false;
479}
480
485 const DebugLoc &DL,
486 int *BytesAdded) const {
487 // In this function we output 32-bit branches, which should always
488 // have enough range. They can be shortened and relaxed by later code
489 // in the pipeline, if desired.
490
491 // Shouldn't be a fall through.
492 assert(TBB && "insertBranch must not be told to insert a fallthrough");
493 assert((Cond.size() == 2 || Cond.size() == 0) &&
494 "SystemZ branch conditions have one component!");
495 assert(!BytesAdded && "code size not handled");
496
497 if (Cond.empty()) {
498 // Unconditional branch?
499 assert(!FBB && "Unconditional branch with multiple successors!");
500 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
501 return 1;
502 }
503
504 // Conditional branch.
505 unsigned Count = 0;
506 unsigned CCValid = Cond[0].getImm();
507 unsigned CCMask = Cond[1].getImm();
508 BuildMI(&MBB, DL, get(SystemZ::BRC))
509 .addImm(CCValid).addImm(CCMask).addMBB(TBB);
510 ++Count;
511
512 if (FBB) {
513 // Two-way Conditional branch. Insert the second branch.
514 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
515 ++Count;
516 }
517 return Count;
518}
519
521 Register &SrcReg2, int64_t &Mask,
522 int64_t &Value) const {
523 assert(MI.isCompare() && "Caller should have checked for a comparison");
524
525 if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
526 MI.getOperand(1).isImm()) {
527 SrcReg = MI.getOperand(0).getReg();
528 SrcReg2 = 0;
529 Value = MI.getOperand(1).getImm();
530 Mask = ~0;
531 return true;
532 }
533
534 return false;
535}
536
539 Register DstReg, Register TrueReg,
540 Register FalseReg, int &CondCycles,
541 int &TrueCycles,
542 int &FalseCycles) const {
543 // Not all subtargets have LOCR instructions.
544 if (!STI.hasLoadStoreOnCond())
545 return false;
546 if (Pred.size() != 2)
547 return false;
548
549 // Check register classes.
551 const TargetRegisterClass *RC =
552 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
553 if (!RC)
554 return false;
555
556 // We have LOCR instructions for 32 and 64 bit general purpose registers.
557 if ((STI.hasLoadStoreOnCond2() &&
558 SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) ||
559 SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
560 SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
561 CondCycles = 2;
562 TrueCycles = 2;
563 FalseCycles = 2;
564 return true;
565 }
566
567 // Can't do anything else.
568 return false;
569}
570
573 const DebugLoc &DL, Register DstReg,
575 Register TrueReg,
576 Register FalseReg) const {
578 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
579
580 assert(Pred.size() == 2 && "Invalid condition");
581 unsigned CCValid = Pred[0].getImm();
582 unsigned CCMask = Pred[1].getImm();
583
584 unsigned Opc;
585 if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) {
586 if (STI.hasMiscellaneousExtensions3())
587 Opc = SystemZ::SELRMux;
588 else if (STI.hasLoadStoreOnCond2())
589 Opc = SystemZ::LOCRMux;
590 else {
591 Opc = SystemZ::LOCR;
592 MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass);
593 Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
594 Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
595 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);
596 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);
597 TrueReg = TReg;
598 FalseReg = FReg;
599 }
600 } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
601 if (STI.hasMiscellaneousExtensions3())
602 Opc = SystemZ::SELGR;
603 else
604 Opc = SystemZ::LOCGR;
605 } else
606 llvm_unreachable("Invalid register class");
607
608 BuildMI(MBB, I, DL, get(Opc), DstReg)
609 .addReg(FalseReg).addReg(TrueReg)
610 .addImm(CCValid).addImm(CCMask);
611}
612
614 Register Reg,
615 MachineRegisterInfo *MRI) const {
616 unsigned DefOpc = DefMI.getOpcode();
617 if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
618 DefOpc != SystemZ::LGHI)
619 return false;
620 if (DefMI.getOperand(0).getReg() != Reg)
621 return false;
622 int32_t ImmVal = (int32_t)DefMI.getOperand(1).getImm();
623
624 unsigned UseOpc = UseMI.getOpcode();
625 unsigned NewUseOpc;
626 unsigned UseIdx;
627 int CommuteIdx = -1;
628 bool TieOps = false;
629 switch (UseOpc) {
630 case SystemZ::SELRMux:
631 TieOps = true;
632 [[fallthrough]];
633 case SystemZ::LOCRMux:
634 if (!STI.hasLoadStoreOnCond2())
635 return false;
636 NewUseOpc = SystemZ::LOCHIMux;
637 if (UseMI.getOperand(2).getReg() == Reg)
638 UseIdx = 2;
639 else if (UseMI.getOperand(1).getReg() == Reg)
640 UseIdx = 2, CommuteIdx = 1;
641 else
642 return false;
643 break;
644 case SystemZ::SELGR:
645 TieOps = true;
646 [[fallthrough]];
647 case SystemZ::LOCGR:
648 if (!STI.hasLoadStoreOnCond2())
649 return false;
650 NewUseOpc = SystemZ::LOCGHI;
651 if (UseMI.getOperand(2).getReg() == Reg)
652 UseIdx = 2;
653 else if (UseMI.getOperand(1).getReg() == Reg)
654 UseIdx = 2, CommuteIdx = 1;
655 else
656 return false;
657 break;
658 default:
659 return false;
660 }
661
662 if (CommuteIdx != -1)
663 if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx))
664 return false;
665
666 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
667 UseMI.setDesc(get(NewUseOpc));
668 if (TieOps)
669 UseMI.tieOperands(0, 1);
670 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
671 if (DeleteDef)
672 DefMI.eraseFromParent();
673
674 return true;
675}
676
678 unsigned Opcode = MI.getOpcode();
679 if (Opcode == SystemZ::Return ||
680 Opcode == SystemZ::Return_XPLINK ||
681 Opcode == SystemZ::Trap ||
682 Opcode == SystemZ::CallJG ||
683 Opcode == SystemZ::CallBR)
684 return true;
685 return false;
686}
687
690 unsigned NumCycles, unsigned ExtraPredCycles,
691 BranchProbability Probability) const {
692 // Avoid using conditional returns at the end of a loop (since then
693 // we'd need to emit an unconditional branch to the beginning anyway,
694 // making the loop body longer). This doesn't apply for low-probability
695 // loops (eg. compare-and-swap retry), so just decide based on branch
696 // probability instead of looping structure.
697 // However, since Compare and Trap instructions cost the same as a regular
698 // Compare instruction, we should allow the if conversion to convert this
699 // into a Conditional Compare regardless of the branch probability.
700 if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
701 MBB.succ_empty() && Probability < BranchProbability(1, 8))
702 return false;
703 // For now only convert single instructions.
704 return NumCycles == 1;
705}
706
709 unsigned NumCyclesT, unsigned ExtraPredCyclesT,
710 MachineBasicBlock &FMBB,
711 unsigned NumCyclesF, unsigned ExtraPredCyclesF,
712 BranchProbability Probability) const {
713 // For now avoid converting mutually-exclusive cases.
714 return false;
715}
716
719 BranchProbability Probability) const {
720 // For now only duplicate single instructions.
721 return NumCycles == 1;
722}
723
726 assert(Pred.size() == 2 && "Invalid condition");
727 unsigned CCValid = Pred[0].getImm();
728 unsigned CCMask = Pred[1].getImm();
729 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
730 unsigned Opcode = MI.getOpcode();
731 if (Opcode == SystemZ::Trap) {
732 MI.setDesc(get(SystemZ::CondTrap));
733 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
734 .addImm(CCValid).addImm(CCMask)
735 .addReg(SystemZ::CC, RegState::Implicit);
736 return true;
737 }
738 if (Opcode == SystemZ::Return || Opcode == SystemZ::Return_XPLINK) {
739 MI.setDesc(get(Opcode == SystemZ::Return ? SystemZ::CondReturn
740 : SystemZ::CondReturn_XPLINK));
741 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
742 .addImm(CCValid)
743 .addImm(CCMask)
744 .addReg(SystemZ::CC, RegState::Implicit);
745 return true;
746 }
747 if (Opcode == SystemZ::CallJG) {
748 MachineOperand FirstOp = MI.getOperand(0);
749 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
750 MI.removeOperand(1);
751 MI.removeOperand(0);
752 MI.setDesc(get(SystemZ::CallBRCL));
753 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
754 .addImm(CCValid)
755 .addImm(CCMask)
756 .add(FirstOp)
757 .addRegMask(RegMask)
758 .addReg(SystemZ::CC, RegState::Implicit);
759 return true;
760 }
761 if (Opcode == SystemZ::CallBR) {
762 MachineOperand Target = MI.getOperand(0);
763 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
764 MI.removeOperand(1);
765 MI.removeOperand(0);
766 MI.setDesc(get(SystemZ::CallBCR));
767 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
768 .addImm(CCValid).addImm(CCMask)
769 .add(Target)
770 .addRegMask(RegMask)
771 .addReg(SystemZ::CC, RegState::Implicit);
772 return true;
773 }
774 return false;
775}
776
779 const DebugLoc &DL, MCRegister DestReg,
780 MCRegister SrcReg, bool KillSrc) const {
781 // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
782 // super register in case one of the subregs is undefined.
783 // This handles ADDR128 too.
784 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
785 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
786 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
787 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
788 .addReg(SrcReg, RegState::Implicit);
789 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
790 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
791 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
792 .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
793 return;
794 }
795
796 if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
797 emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc,
798 false);
799 return;
800 }
801
802 // Move 128-bit floating-point values between VR128 and FP128.
803 if (SystemZ::VR128BitRegClass.contains(DestReg) &&
804 SystemZ::FP128BitRegClass.contains(SrcReg)) {
805 MCRegister SrcRegHi =
806 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64),
807 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
808 MCRegister SrcRegLo =
809 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64),
810 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
811
812 BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg)
813 .addReg(SrcRegHi, getKillRegState(KillSrc))
814 .addReg(SrcRegLo, getKillRegState(KillSrc));
815 return;
816 }
817 if (SystemZ::FP128BitRegClass.contains(DestReg) &&
818 SystemZ::VR128BitRegClass.contains(SrcReg)) {
819 MCRegister DestRegHi =
820 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64),
821 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
822 MCRegister DestRegLo =
823 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64),
824 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
825
826 if (DestRegHi != SrcReg)
827 copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false);
828 BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo)
829 .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1);
830 return;
831 }
832
833 // Move CC value from a GR32.
834 if (DestReg == SystemZ::CC) {
835 unsigned Opcode =
836 SystemZ::GR32BitRegClass.contains(SrcReg) ? SystemZ::TMLH : SystemZ::TMHH;
837 BuildMI(MBB, MBBI, DL, get(Opcode))
838 .addReg(SrcReg, getKillRegState(KillSrc))
839 .addImm(3 << (SystemZ::IPM_CC - 16));
840 return;
841 }
842
843 // Everything else needs only one instruction.
844 unsigned Opcode;
845 if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
846 Opcode = SystemZ::LGR;
847 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
848 // For z13 we prefer LDR over LER to avoid partial register dependencies.
849 Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
850 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
851 Opcode = SystemZ::LDR;
852 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
853 Opcode = SystemZ::LXR;
854 else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
855 Opcode = SystemZ::VLR32;
856 else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
857 Opcode = SystemZ::VLR64;
858 else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
859 Opcode = SystemZ::VLR;
860 else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg))
861 Opcode = SystemZ::CPYA;
862 else
863 llvm_unreachable("Impossible reg-to-reg copy");
864
865 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
866 .addReg(SrcReg, getKillRegState(KillSrc));
867}
868
871 bool isKill, int FrameIdx, const TargetRegisterClass *RC,
872 const TargetRegisterInfo *TRI, Register VReg) const {
873 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
874
875 // Callers may expect a single instruction, so keep 128-bit moves
876 // together for now and lower them after register allocation.
877 unsigned LoadOpcode, StoreOpcode;
878 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
879 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
880 .addReg(SrcReg, getKillRegState(isKill)),
881 FrameIdx);
882}
883
886 Register DestReg, int FrameIdx,
887 const TargetRegisterClass *RC,
888 const TargetRegisterInfo *TRI,
889 Register VReg) const {
890 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
891
892 // Callers may expect a single instruction, so keep 128-bit moves
893 // together for now and lower them after register allocation.
894 unsigned LoadOpcode, StoreOpcode;
895 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
896 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
897 FrameIdx);
898}
899
900// Return true if MI is a simple load or store with a 12-bit displacement
901// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
902static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
903 const MCInstrDesc &MCID = MI->getDesc();
904 return ((MCID.TSFlags & Flag) &&
905 isUInt<12>(MI->getOperand(2).getImm()) &&
906 MI->getOperand(3).getReg() == 0);
907}
908
909namespace {
910
911struct LogicOp {
912 LogicOp() = default;
913 LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
914 : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
915
916 explicit operator bool() const { return RegSize; }
917
918 unsigned RegSize = 0;
919 unsigned ImmLSB = 0;
920 unsigned ImmSize = 0;
921};
922
923} // end anonymous namespace
924
925static LogicOp interpretAndImmediate(unsigned Opcode) {
926 switch (Opcode) {
927 case SystemZ::NILMux: return LogicOp(32, 0, 16);
928 case SystemZ::NIHMux: return LogicOp(32, 16, 16);
929 case SystemZ::NILL64: return LogicOp(64, 0, 16);
930 case SystemZ::NILH64: return LogicOp(64, 16, 16);
931 case SystemZ::NIHL64: return LogicOp(64, 32, 16);
932 case SystemZ::NIHH64: return LogicOp(64, 48, 16);
933 case SystemZ::NIFMux: return LogicOp(32, 0, 32);
934 case SystemZ::NILF64: return LogicOp(64, 0, 32);
935 case SystemZ::NIHF64: return LogicOp(64, 32, 32);
936 default: return LogicOp();
937 }
938}
939
940static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
941 if (OldMI->registerDefIsDead(SystemZ::CC)) {
942 MachineOperand *CCDef = NewMI->findRegisterDefOperand(SystemZ::CC);
943 if (CCDef != nullptr)
944 CCDef->setIsDead(true);
945 }
946}
947
948static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI,
950 if (OldMI->getFlag(Flag))
951 NewMI->setFlag(Flag);
952}
953
956 LiveIntervals *LIS) const {
957 MachineBasicBlock *MBB = MI.getParent();
958
959 // Try to convert an AND into an RISBG-type instruction.
960 // TODO: It might be beneficial to select RISBG and shorten to AND instead.
961 if (LogicOp And = interpretAndImmediate(MI.getOpcode())) {
962 uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
963 // AND IMMEDIATE leaves the other bits of the register unchanged.
964 Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
965 unsigned Start, End;
966 if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
967 unsigned NewOpcode;
968 if (And.RegSize == 64) {
969 NewOpcode = SystemZ::RISBG;
970 // Prefer RISBGN if available, since it does not clobber CC.
971 if (STI.hasMiscellaneousExtensions())
972 NewOpcode = SystemZ::RISBGN;
973 } else {
974 NewOpcode = SystemZ::RISBMux;
975 Start &= 31;
976 End &= 31;
977 }
978 MachineOperand &Dest = MI.getOperand(0);
979 MachineOperand &Src = MI.getOperand(1);
981 BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
982 .add(Dest)
983 .addReg(0)
984 .addReg(Src.getReg(), getKillRegState(Src.isKill()),
985 Src.getSubReg())
986 .addImm(Start)
987 .addImm(End + 128)
988 .addImm(0);
989 if (LV) {
990 unsigned NumOps = MI.getNumOperands();
991 for (unsigned I = 1; I < NumOps; ++I) {
992 MachineOperand &Op = MI.getOperand(I);
993 if (Op.isReg() && Op.isKill())
994 LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
995 }
996 }
997 if (LIS)
998 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
999 transferDeadCC(&MI, MIB);
1000 return MIB;
1001 }
1002 }
1003 return nullptr;
1004}
1005
1008 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1009 LiveIntervals *LIS, VirtRegMap *VRM) const {
1012 const MachineFrameInfo &MFI = MF.getFrameInfo();
1013 unsigned Size = MFI.getObjectSize(FrameIndex);
1014 unsigned Opcode = MI.getOpcode();
1015
1016 // Check CC liveness if new instruction introduces a dead def of CC.
1017 MCRegUnitIterator CCUnit(MCRegister::from(SystemZ::CC), TRI);
1018 SlotIndex MISlot = SlotIndex();
1019 LiveRange *CCLiveRange = nullptr;
1020 bool CCLiveAtMI = true;
1021 if (LIS) {
1022 MISlot = LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
1023 CCLiveRange = &LIS->getRegUnit(*CCUnit);
1024 CCLiveAtMI = CCLiveRange->liveAt(MISlot);
1025 }
1026 ++CCUnit;
1027 assert(!CCUnit.isValid() && "CC only has one reg unit.");
1028
1029 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1030 if (!CCLiveAtMI && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
1031 isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
1032 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1033 MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
1034 MI.getDebugLoc(), get(SystemZ::AGSI))
1035 .addFrameIndex(FrameIndex)
1036 .addImm(0)
1037 .addImm(MI.getOperand(2).getImm());
1038 BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true);
1039 CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1040 return BuiltMI;
1041 }
1042 return nullptr;
1043 }
1044
1045 // All other cases require a single operand.
1046 if (Ops.size() != 1)
1047 return nullptr;
1048
1049 unsigned OpNum = Ops[0];
1050 assert(Size * 8 ==
1051 TRI->getRegSizeInBits(*MF.getRegInfo()
1052 .getRegClass(MI.getOperand(OpNum).getReg())) &&
1053 "Invalid size combination");
1054
1055 if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
1056 isInt<8>(MI.getOperand(2).getImm())) {
1057 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1058 Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
1059 MachineInstr *BuiltMI =
1060 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1061 .addFrameIndex(FrameIndex)
1062 .addImm(0)
1063 .addImm(MI.getOperand(2).getImm());
1064 transferDeadCC(&MI, BuiltMI);
1066 return BuiltMI;
1067 }
1068
1069 if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
1070 isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
1071 (Opcode == SystemZ::ALGFI && OpNum == 0 &&
1072 isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
1073 // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1074 Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
1075 MachineInstr *BuiltMI =
1076 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1077 .addFrameIndex(FrameIndex)
1078 .addImm(0)
1079 .addImm((int8_t)MI.getOperand(2).getImm());
1080 transferDeadCC(&MI, BuiltMI);
1081 return BuiltMI;
1082 }
1083
1084 if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
1085 isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
1086 (Opcode == SystemZ::SLGFI && OpNum == 0 &&
1087 isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
1088 // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1089 Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
1090 MachineInstr *BuiltMI =
1091 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1092 .addFrameIndex(FrameIndex)
1093 .addImm(0)
1094 .addImm((int8_t)-MI.getOperand(2).getImm());
1095 transferDeadCC(&MI, BuiltMI);
1096 return BuiltMI;
1097 }
1098
1099 unsigned MemImmOpc = 0;
1100 switch (Opcode) {
1101 case SystemZ::LHIMux:
1102 case SystemZ::LHI: MemImmOpc = SystemZ::MVHI; break;
1103 case SystemZ::LGHI: MemImmOpc = SystemZ::MVGHI; break;
1104 case SystemZ::CHIMux:
1105 case SystemZ::CHI: MemImmOpc = SystemZ::CHSI; break;
1106 case SystemZ::CGHI: MemImmOpc = SystemZ::CGHSI; break;
1107 case SystemZ::CLFIMux:
1108 case SystemZ::CLFI:
1109 if (isUInt<16>(MI.getOperand(1).getImm()))
1110 MemImmOpc = SystemZ::CLFHSI;
1111 break;
1112 case SystemZ::CLGFI:
1113 if (isUInt<16>(MI.getOperand(1).getImm()))
1114 MemImmOpc = SystemZ::CLGHSI;
1115 break;
1116 default: break;
1117 }
1118 if (MemImmOpc)
1119 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1120 get(MemImmOpc))
1121 .addFrameIndex(FrameIndex)
1122 .addImm(0)
1123 .addImm(MI.getOperand(1).getImm());
1124
1125 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
1126 bool Op0IsGPR = (Opcode == SystemZ::LGDR);
1127 bool Op1IsGPR = (Opcode == SystemZ::LDGR);
1128 // If we're spilling the destination of an LDGR or LGDR, store the
1129 // source register instead.
1130 if (OpNum == 0) {
1131 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
1132 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1133 get(StoreOpcode))
1134 .add(MI.getOperand(1))
1135 .addFrameIndex(FrameIndex)
1136 .addImm(0)
1137 .addReg(0);
1138 }
1139 // If we're spilling the source of an LDGR or LGDR, load the
1140 // destination register instead.
1141 if (OpNum == 1) {
1142 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
1143 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1144 get(LoadOpcode))
1145 .add(MI.getOperand(0))
1146 .addFrameIndex(FrameIndex)
1147 .addImm(0)
1148 .addReg(0);
1149 }
1150 }
1151
1152 // Look for cases where the source of a simple store or the destination
1153 // of a simple load is being spilled. Try to use MVC instead.
1154 //
1155 // Although MVC is in practice a fast choice in these cases, it is still
1156 // logically a bytewise copy. This means that we cannot use it if the
1157 // load or store is volatile. We also wouldn't be able to use MVC if
1158 // the two memories partially overlap, but that case cannot occur here,
1159 // because we know that one of the memories is a full frame index.
1160 //
1161 // For performance reasons, we also want to avoid using MVC if the addresses
1162 // might be equal. We don't worry about that case here, because spill slot
1163 // coloring happens later, and because we have special code to remove
1164 // MVCs that turn out to be redundant.
1165 if (OpNum == 0 && MI.hasOneMemOperand()) {
1166 MachineMemOperand *MMO = *MI.memoperands_begin();
1167 if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) {
1168 // Handle conversion of loads.
1170 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1171 get(SystemZ::MVC))
1172 .addFrameIndex(FrameIndex)
1173 .addImm(0)
1174 .addImm(Size)
1175 .add(MI.getOperand(1))
1176 .addImm(MI.getOperand(2).getImm())
1177 .addMemOperand(MMO);
1178 }
1179 // Handle conversion of stores.
1181 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1182 get(SystemZ::MVC))
1183 .add(MI.getOperand(1))
1184 .addImm(MI.getOperand(2).getImm())
1185 .addImm(Size)
1186 .addFrameIndex(FrameIndex)
1187 .addImm(0)
1188 .addMemOperand(MMO);
1189 }
1190 }
1191 }
1192
1193 // If the spilled operand is the final one or the instruction is
1194 // commutable, try to change <INSN>R into <INSN>. Don't introduce a def of
1195 // CC if it is live and MI does not define it.
1196 unsigned NumOps = MI.getNumExplicitOperands();
1197 int MemOpcode = SystemZ::getMemOpcode(Opcode);
1198 if (MemOpcode == -1 ||
1199 (CCLiveAtMI && !MI.definesRegister(SystemZ::CC) &&
1200 get(MemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)))
1201 return nullptr;
1202
1203 // Check if all other vregs have a usable allocation in the case of vector
1204 // to FP conversion.
1205 const MCInstrDesc &MCID = MI.getDesc();
1206 for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) {
1207 const MCOperandInfo &MCOI = MCID.operands()[I];
1208 if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum)
1209 continue;
1210 const TargetRegisterClass *RC = TRI->getRegClass(MCOI.RegClass);
1211 if (RC == &SystemZ::VR32BitRegClass || RC == &SystemZ::VR64BitRegClass) {
1212 Register Reg = MI.getOperand(I).getReg();
1213 Register PhysReg = Reg.isVirtual()
1214 ? (VRM ? Register(VRM->getPhys(Reg)) : Register())
1215 : Reg;
1216 if (!PhysReg ||
1217 !(SystemZ::FP32BitRegClass.contains(PhysReg) ||
1218 SystemZ::FP64BitRegClass.contains(PhysReg) ||
1219 SystemZ::VF128BitRegClass.contains(PhysReg)))
1220 return nullptr;
1221 }
1222 }
1223 // Fused multiply and add/sub need to have the same dst and accumulator reg.
1224 bool FusedFPOp = (Opcode == SystemZ::WFMADB || Opcode == SystemZ::WFMASB ||
1225 Opcode == SystemZ::WFMSDB || Opcode == SystemZ::WFMSSB);
1226 if (FusedFPOp) {
1227 Register DstReg = VRM->getPhys(MI.getOperand(0).getReg());
1228 Register AccReg = VRM->getPhys(MI.getOperand(3).getReg());
1229 if (OpNum == 0 || OpNum == 3 || DstReg != AccReg)
1230 return nullptr;
1231 }
1232
1233 // Try to swap compare operands if possible.
1234 bool NeedsCommute = false;
1235 if ((MI.getOpcode() == SystemZ::CR || MI.getOpcode() == SystemZ::CGR ||
1236 MI.getOpcode() == SystemZ::CLR || MI.getOpcode() == SystemZ::CLGR ||
1237 MI.getOpcode() == SystemZ::WFCDB || MI.getOpcode() == SystemZ::WFCSB ||
1238 MI.getOpcode() == SystemZ::WFKDB || MI.getOpcode() == SystemZ::WFKSB) &&
1239 OpNum == 0 && prepareCompareSwapOperands(MI))
1240 NeedsCommute = true;
1241
1242 bool CCOperands = false;
1243 if (MI.getOpcode() == SystemZ::LOCRMux || MI.getOpcode() == SystemZ::LOCGR ||
1244 MI.getOpcode() == SystemZ::SELRMux || MI.getOpcode() == SystemZ::SELGR) {
1245 assert(MI.getNumOperands() == 6 && NumOps == 5 &&
1246 "LOCR/SELR instruction operands corrupt?");
1247 NumOps -= 2;
1248 CCOperands = true;
1249 }
1250
1251 // See if this is a 3-address instruction that is convertible to 2-address
1252 // and suitable for folding below. Only try this with virtual registers
1253 // and a provided VRM (during regalloc).
1254 if (NumOps == 3 && SystemZ::getTargetMemOpcode(MemOpcode) != -1) {
1255 if (VRM == nullptr)
1256 return nullptr;
1257 else {
1258 Register DstReg = MI.getOperand(0).getReg();
1259 Register DstPhys =
1260 (DstReg.isVirtual() ? Register(VRM->getPhys(DstReg)) : DstReg);
1261 Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
1262 : ((OpNum == 1 && MI.isCommutable())
1263 ? MI.getOperand(2).getReg()
1264 : Register()));
1265 if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
1266 SrcReg.isVirtual() && DstPhys == VRM->getPhys(SrcReg))
1267 NeedsCommute = (OpNum == 1);
1268 else
1269 return nullptr;
1270 }
1271 }
1272
1273 if ((OpNum == NumOps - 1) || NeedsCommute || FusedFPOp) {
1274 const MCInstrDesc &MemDesc = get(MemOpcode);
1275 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
1276 assert(AccessBytes != 0 && "Size of access should be known");
1277 assert(AccessBytes <= Size && "Access outside the frame index");
1278 uint64_t Offset = Size - AccessBytes;
1279 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
1280 MI.getDebugLoc(), get(MemOpcode));
1281 if (MI.isCompare()) {
1282 assert(NumOps == 2 && "Expected 2 register operands for a compare.");
1283 MIB.add(MI.getOperand(NeedsCommute ? 1 : 0));
1284 }
1285 else if (FusedFPOp) {
1286 MIB.add(MI.getOperand(0));
1287 MIB.add(MI.getOperand(3));
1288 MIB.add(MI.getOperand(OpNum == 1 ? 2 : 1));
1289 }
1290 else {
1291 MIB.add(MI.getOperand(0));
1292 if (NeedsCommute)
1293 MIB.add(MI.getOperand(2));
1294 else
1295 for (unsigned I = 1; I < OpNum; ++I)
1296 MIB.add(MI.getOperand(I));
1297 }
1298 MIB.addFrameIndex(FrameIndex).addImm(Offset);
1299 if (MemDesc.TSFlags & SystemZII::HasIndex)
1300 MIB.addReg(0);
1301 if (CCOperands) {
1302 unsigned CCValid = MI.getOperand(NumOps).getImm();
1303 unsigned CCMask = MI.getOperand(NumOps + 1).getImm();
1304 MIB.addImm(CCValid);
1305 MIB.addImm(NeedsCommute ? CCMask ^ CCValid : CCMask);
1306 }
1307 if (MIB->definesRegister(SystemZ::CC) &&
1308 (!MI.definesRegister(SystemZ::CC) ||
1309 MI.registerDefIsDead(SystemZ::CC))) {
1310 MIB->addRegisterDead(SystemZ::CC, TRI);
1311 if (CCLiveRange)
1312 CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1313 }
1314 // Constrain the register classes if converted from a vector opcode. The
1315 // allocated regs are in an FP reg-class per previous check above.
1316 for (const MachineOperand &MO : MIB->operands())
1317 if (MO.isReg() && MO.getReg().isVirtual()) {
1318 Register Reg = MO.getReg();
1319 if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass)
1320 MRI.setRegClass(Reg, &SystemZ::FP32BitRegClass);
1321 else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass)
1322 MRI.setRegClass(Reg, &SystemZ::FP64BitRegClass);
1323 else if (MRI.getRegClass(Reg) == &SystemZ::VR128BitRegClass)
1324 MRI.setRegClass(Reg, &SystemZ::VF128BitRegClass);
1325 }
1326
1327 transferDeadCC(&MI, MIB);
1330 return MIB;
1331 }
1332
1333 return nullptr;
1334}
1335
1338 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1339 LiveIntervals *LIS) const {
1340 return nullptr;
1341}
1342
1344 switch (MI.getOpcode()) {
1345 case SystemZ::L128:
1346 splitMove(MI, SystemZ::LG);
1347 return true;
1348
1349 case SystemZ::ST128:
1350 splitMove(MI, SystemZ::STG);
1351 return true;
1352
1353 case SystemZ::LX:
1354 splitMove(MI, SystemZ::LD);
1355 return true;
1356
1357 case SystemZ::STX:
1358 splitMove(MI, SystemZ::STD);
1359 return true;
1360
1361 case SystemZ::LBMux:
1362 expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
1363 return true;
1364
1365 case SystemZ::LHMux:
1366 expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
1367 return true;
1368
1369 case SystemZ::LLCRMux:
1370 expandZExtPseudo(MI, SystemZ::LLCR, 8);
1371 return true;
1372
1373 case SystemZ::LLHRMux:
1374 expandZExtPseudo(MI, SystemZ::LLHR, 16);
1375 return true;
1376
1377 case SystemZ::LLCMux:
1378 expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
1379 return true;
1380
1381 case SystemZ::LLHMux:
1382 expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
1383 return true;
1384
1385 case SystemZ::LMux:
1386 expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
1387 return true;
1388
1389 case SystemZ::LOCMux:
1390 expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH);
1391 return true;
1392
1393 case SystemZ::LOCHIMux:
1394 expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);
1395 return true;
1396
1397 case SystemZ::STCMux:
1398 expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
1399 return true;
1400
1401 case SystemZ::STHMux:
1402 expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
1403 return true;
1404
1405 case SystemZ::STMux:
1406 expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
1407 return true;
1408
1409 case SystemZ::STOCMux:
1410 expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH);
1411 return true;
1412
1413 case SystemZ::LHIMux:
1414 expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
1415 return true;
1416
1417 case SystemZ::IIFMux:
1418 expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
1419 return true;
1420
1421 case SystemZ::IILMux:
1422 expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
1423 return true;
1424
1425 case SystemZ::IIHMux:
1426 expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
1427 return true;
1428
1429 case SystemZ::NIFMux:
1430 expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
1431 return true;
1432
1433 case SystemZ::NILMux:
1434 expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
1435 return true;
1436
1437 case SystemZ::NIHMux:
1438 expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
1439 return true;
1440
1441 case SystemZ::OIFMux:
1442 expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
1443 return true;
1444
1445 case SystemZ::OILMux:
1446 expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
1447 return true;
1448
1449 case SystemZ::OIHMux:
1450 expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
1451 return true;
1452
1453 case SystemZ::XIFMux:
1454 expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
1455 return true;
1456
1457 case SystemZ::TMLMux:
1458 expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
1459 return true;
1460
1461 case SystemZ::TMHMux:
1462 expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
1463 return true;
1464
1465 case SystemZ::AHIMux:
1466 expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
1467 return true;
1468
1469 case SystemZ::AHIMuxK:
1470 expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
1471 return true;
1472
1473 case SystemZ::AFIMux:
1474 expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
1475 return true;
1476
1477 case SystemZ::CHIMux:
1478 expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false);
1479 return true;
1480
1481 case SystemZ::CFIMux:
1482 expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
1483 return true;
1484
1485 case SystemZ::CLFIMux:
1486 expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
1487 return true;
1488
1489 case SystemZ::CMux:
1490 expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
1491 return true;
1492
1493 case SystemZ::CLMux:
1494 expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
1495 return true;
1496
1497 case SystemZ::RISBMux: {
1498 bool DestIsHigh = SystemZ::isHighReg(MI.getOperand(0).getReg());
1499 bool SrcIsHigh = SystemZ::isHighReg(MI.getOperand(2).getReg());
1500 if (SrcIsHigh == DestIsHigh)
1501 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
1502 else {
1503 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
1504 MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
1505 }
1506 return true;
1507 }
1508
1509 case SystemZ::ADJDYNALLOC:
1510 splitAdjDynAlloc(MI);
1511 return true;
1512
1513 case TargetOpcode::LOAD_STACK_GUARD:
1514 expandLoadStackGuard(&MI);
1515 return true;
1516
1517 default:
1518 return false;
1519 }
1520}
1521
1523 if (MI.isInlineAsm()) {
1524 const MachineFunction *MF = MI.getParent()->getParent();
1525 const char *AsmStr = MI.getOperand(0).getSymbolName();
1526 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1527 }
1528 else if (MI.getOpcode() == SystemZ::PATCHPOINT)
1530 else if (MI.getOpcode() == SystemZ::STACKMAP)
1531 return MI.getOperand(1).getImm();
1532 else if (MI.getOpcode() == SystemZ::FENTRY_CALL)
1533 return 6;
1534
1535 return MI.getDesc().getSize();
1536}
1537
1540 switch (MI.getOpcode()) {
1541 case SystemZ::BR:
1542 case SystemZ::BI:
1543 case SystemZ::J:
1544 case SystemZ::JG:
1546 SystemZ::CCMASK_ANY, &MI.getOperand(0));
1547
1548 case SystemZ::BRC:
1549 case SystemZ::BRCL:
1550 return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
1551 MI.getOperand(1).getImm(), &MI.getOperand(2));
1552
1553 case SystemZ::BRCT:
1554 case SystemZ::BRCTH:
1556 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1557
1558 case SystemZ::BRCTG:
1560 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1561
1562 case SystemZ::CIJ:
1563 case SystemZ::CRJ:
1565 MI.getOperand(2).getImm(), &MI.getOperand(3));
1566
1567 case SystemZ::CLIJ:
1568 case SystemZ::CLRJ:
1570 MI.getOperand(2).getImm(), &MI.getOperand(3));
1571
1572 case SystemZ::CGIJ:
1573 case SystemZ::CGRJ:
1575 MI.getOperand(2).getImm(), &MI.getOperand(3));
1576
1577 case SystemZ::CLGIJ:
1578 case SystemZ::CLGRJ:
1580 MI.getOperand(2).getImm(), &MI.getOperand(3));
1581
1582 case SystemZ::INLINEASM_BR:
1583 // Don't try to analyze asm goto, so pass nullptr as branch target argument.
1584 return SystemZII::Branch(SystemZII::AsmGoto, 0, 0, nullptr);
1585
1586 default:
1587 llvm_unreachable("Unrecognized branch opcode");
1588 }
1589}
1590
1592 unsigned &LoadOpcode,
1593 unsigned &StoreOpcode) const {
1594 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
1595 LoadOpcode = SystemZ::L;
1596 StoreOpcode = SystemZ::ST;
1597 } else if (RC == &SystemZ::GRH32BitRegClass) {
1598 LoadOpcode = SystemZ::LFH;
1599 StoreOpcode = SystemZ::STFH;
1600 } else if (RC == &SystemZ::GRX32BitRegClass) {
1601 LoadOpcode = SystemZ::LMux;
1602 StoreOpcode = SystemZ::STMux;
1603 } else if (RC == &SystemZ::GR64BitRegClass ||
1604 RC == &SystemZ::ADDR64BitRegClass) {
1605 LoadOpcode = SystemZ::LG;
1606 StoreOpcode = SystemZ::STG;
1607 } else if (RC == &SystemZ::GR128BitRegClass ||
1608 RC == &SystemZ::ADDR128BitRegClass) {
1609 LoadOpcode = SystemZ::L128;
1610 StoreOpcode = SystemZ::ST128;
1611 } else if (RC == &SystemZ::FP32BitRegClass) {
1612 LoadOpcode = SystemZ::LE;
1613 StoreOpcode = SystemZ::STE;
1614 } else if (RC == &SystemZ::FP64BitRegClass) {
1615 LoadOpcode = SystemZ::LD;
1616 StoreOpcode = SystemZ::STD;
1617 } else if (RC == &SystemZ::FP128BitRegClass) {
1618 LoadOpcode = SystemZ::LX;
1619 StoreOpcode = SystemZ::STX;
1620 } else if (RC == &SystemZ::VR32BitRegClass) {
1621 LoadOpcode = SystemZ::VL32;
1622 StoreOpcode = SystemZ::VST32;
1623 } else if (RC == &SystemZ::VR64BitRegClass) {
1624 LoadOpcode = SystemZ::VL64;
1625 StoreOpcode = SystemZ::VST64;
1626 } else if (RC == &SystemZ::VF128BitRegClass ||
1627 RC == &SystemZ::VR128BitRegClass) {
1628 LoadOpcode = SystemZ::VL;
1629 StoreOpcode = SystemZ::VST;
1630 } else
1631 llvm_unreachable("Unsupported regclass to load or store");
1632}
1633
1635 int64_t Offset,
1636 const MachineInstr *MI) const {
1637 const MCInstrDesc &MCID = get(Opcode);
1638 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
1639 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
1640 // Get the instruction to use for unsigned 12-bit displacements.
1641 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
1642 if (Disp12Opcode >= 0)
1643 return Disp12Opcode;
1644
1645 // All address-related instructions can use unsigned 12-bit
1646 // displacements.
1647 return Opcode;
1648 }
1649 if (isInt<20>(Offset) && isInt<20>(Offset2)) {
1650 // Get the instruction to use for signed 20-bit displacements.
1651 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
1652 if (Disp20Opcode >= 0)
1653 return Disp20Opcode;
1654
1655 // Check whether Opcode allows signed 20-bit displacements.
1657 return Opcode;
1658
1659 // If a VR32/VR64 reg ended up in an FP register, use the FP opcode.
1660 if (MI && MI->getOperand(0).isReg()) {
1661 Register Reg = MI->getOperand(0).getReg();
1662 if (Reg.isPhysical() && SystemZMC::getFirstReg(Reg) < 16) {
1663 switch (Opcode) {
1664 case SystemZ::VL32:
1665 return SystemZ::LEY;
1666 case SystemZ::VST32:
1667 return SystemZ::STEY;
1668 case SystemZ::VL64:
1669 return SystemZ::LDY;
1670 case SystemZ::VST64:
1671 return SystemZ::STDY;
1672 default: break;
1673 }
1674 }
1675 }
1676 }
1677 return 0;
1678}
1679
1681 const MCInstrDesc &MCID = get(Opcode);
1683 return SystemZ::getDisp12Opcode(Opcode) >= 0;
1684 return SystemZ::getDisp20Opcode(Opcode) >= 0;
1685}
1686
1687unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
1688 switch (Opcode) {
1689 case SystemZ::L: return SystemZ::LT;
1690 case SystemZ::LY: return SystemZ::LT;
1691 case SystemZ::LG: return SystemZ::LTG;
1692 case SystemZ::LGF: return SystemZ::LTGF;
1693 case SystemZ::LR: return SystemZ::LTR;
1694 case SystemZ::LGFR: return SystemZ::LTGFR;
1695 case SystemZ::LGR: return SystemZ::LTGR;
1696 case SystemZ::LER: return SystemZ::LTEBR;
1697 case SystemZ::LDR: return SystemZ::LTDBR;
1698 case SystemZ::LXR: return SystemZ::LTXBR;
1699 case SystemZ::LCDFR: return SystemZ::LCDBR;
1700 case SystemZ::LPDFR: return SystemZ::LPDBR;
1701 case SystemZ::LNDFR: return SystemZ::LNDBR;
1702 case SystemZ::LCDFR_32: return SystemZ::LCEBR;
1703 case SystemZ::LPDFR_32: return SystemZ::LPEBR;
1704 case SystemZ::LNDFR_32: return SystemZ::LNEBR;
1705 // On zEC12 we prefer to use RISBGN. But if there is a chance to
1706 // actually use the condition code, we may turn it back into RISGB.
1707 // Note that RISBG is not really a "load-and-test" instruction,
1708 // but sets the same condition code values, so is OK to use here.
1709 case SystemZ::RISBGN: return SystemZ::RISBG;
1710 default: return 0;
1711 }
1712}
1713
1714bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
1715 unsigned &Start, unsigned &End) const {
1716 // Reject trivial all-zero masks.
1717 Mask &= allOnes(BitSize);
1718 if (Mask == 0)
1719 return false;
1720
1721 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
1722 // the msb and End specifies the index of the lsb.
1723 unsigned LSB, Length;
1724 if (isShiftedMask_64(Mask, LSB, Length)) {
1725 Start = 63 - (LSB + Length - 1);
1726 End = 63 - LSB;
1727 return true;
1728 }
1729
1730 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
1731 // of the low 1s and End specifies the lsb of the high 1s.
1732 if (isShiftedMask_64(Mask ^ allOnes(BitSize), LSB, Length)) {
1733 assert(LSB > 0 && "Bottom bit must be set");
1734 assert(LSB + Length < BitSize && "Top bit must be set");
1735 Start = 63 - (LSB - 1);
1736 End = 63 - (LSB + Length);
1737 return true;
1738 }
1739
1740 return false;
1741}
1742
1743unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
1745 const MachineInstr *MI) const {
1746 switch (Opcode) {
1747 case SystemZ::CHI:
1748 case SystemZ::CGHI:
1749 if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
1750 return 0;
1751 break;
1752 case SystemZ::CLFI:
1753 case SystemZ::CLGFI:
1754 if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
1755 return 0;
1756 break;
1757 case SystemZ::CL:
1758 case SystemZ::CLG:
1759 if (!STI.hasMiscellaneousExtensions())
1760 return 0;
1761 if (!(MI && MI->getOperand(3).getReg() == 0))
1762 return 0;
1763 break;
1764 }
1765 switch (Type) {
1767 switch (Opcode) {
1768 case SystemZ::CR:
1769 return SystemZ::CRJ;
1770 case SystemZ::CGR:
1771 return SystemZ::CGRJ;
1772 case SystemZ::CHI:
1773 return SystemZ::CIJ;
1774 case SystemZ::CGHI:
1775 return SystemZ::CGIJ;
1776 case SystemZ::CLR:
1777 return SystemZ::CLRJ;
1778 case SystemZ::CLGR:
1779 return SystemZ::CLGRJ;
1780 case SystemZ::CLFI:
1781 return SystemZ::CLIJ;
1782 case SystemZ::CLGFI:
1783 return SystemZ::CLGIJ;
1784 default:
1785 return 0;
1786 }
1788 switch (Opcode) {
1789 case SystemZ::CR:
1790 return SystemZ::CRBReturn;
1791 case SystemZ::CGR:
1792 return SystemZ::CGRBReturn;
1793 case SystemZ::CHI:
1794 return SystemZ::CIBReturn;
1795 case SystemZ::CGHI:
1796 return SystemZ::CGIBReturn;
1797 case SystemZ::CLR:
1798 return SystemZ::CLRBReturn;
1799 case SystemZ::CLGR:
1800 return SystemZ::CLGRBReturn;
1801 case SystemZ::CLFI:
1802 return SystemZ::CLIBReturn;
1803 case SystemZ::CLGFI:
1804 return SystemZ::CLGIBReturn;
1805 default:
1806 return 0;
1807 }
1809 switch (Opcode) {
1810 case SystemZ::CR:
1811 return SystemZ::CRBCall;
1812 case SystemZ::CGR:
1813 return SystemZ::CGRBCall;
1814 case SystemZ::CHI:
1815 return SystemZ::CIBCall;
1816 case SystemZ::CGHI:
1817 return SystemZ::CGIBCall;
1818 case SystemZ::CLR:
1819 return SystemZ::CLRBCall;
1820 case SystemZ::CLGR:
1821 return SystemZ::CLGRBCall;
1822 case SystemZ::CLFI:
1823 return SystemZ::CLIBCall;
1824 case SystemZ::CLGFI:
1825 return SystemZ::CLGIBCall;
1826 default:
1827 return 0;
1828 }
1830 switch (Opcode) {
1831 case SystemZ::CR:
1832 return SystemZ::CRT;
1833 case SystemZ::CGR:
1834 return SystemZ::CGRT;
1835 case SystemZ::CHI:
1836 return SystemZ::CIT;
1837 case SystemZ::CGHI:
1838 return SystemZ::CGIT;
1839 case SystemZ::CLR:
1840 return SystemZ::CLRT;
1841 case SystemZ::CLGR:
1842 return SystemZ::CLGRT;
1843 case SystemZ::CLFI:
1844 return SystemZ::CLFIT;
1845 case SystemZ::CLGFI:
1846 return SystemZ::CLGIT;
1847 case SystemZ::CL:
1848 return SystemZ::CLT;
1849 case SystemZ::CLG:
1850 return SystemZ::CLGT;
1851 default:
1852 return 0;
1853 }
1854 }
1855 return 0;
1856}
1857
1860 assert(MBBI->isCompare() && MBBI->getOperand(0).isReg() &&
1861 MBBI->getOperand(1).isReg() && !MBBI->mayLoad() &&
1862 "Not a compare reg/reg.");
1863
1865 bool CCLive = true;
1867 for (MachineInstr &MI : llvm::make_range(std::next(MBBI), MBB->end())) {
1868 if (MI.readsRegister(SystemZ::CC)) {
1869 unsigned Flags = MI.getDesc().TSFlags;
1871 CCUsers.push_back(&MI);
1872 else
1873 return false;
1874 }
1875 if (MI.definesRegister(SystemZ::CC)) {
1876 CCLive = false;
1877 break;
1878 }
1879 }
1880 if (CCLive) {
1882 LiveRegs.addLiveOuts(*MBB);
1883 if (LiveRegs.contains(SystemZ::CC))
1884 return false;
1885 }
1886
1887 // Update all CC users.
1888 for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) {
1889 unsigned Flags = CCUsers[Idx]->getDesc().TSFlags;
1890 unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ?
1891 0 : CCUsers[Idx]->getNumExplicitOperands() - 2);
1892 MachineOperand &CCMaskMO = CCUsers[Idx]->getOperand(FirstOpNum + 1);
1893 unsigned NewCCMask = SystemZ::reverseCCMask(CCMaskMO.getImm());
1894 CCMaskMO.setImm(NewCCMask);
1895 }
1896
1897 return true;
1898}
1899
1900unsigned SystemZ::reverseCCMask(unsigned CCMask) {
1901 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
1904 (CCMask & SystemZ::CCMASK_CMP_UO));
1905}
1906
1908 MachineFunction &MF = *MBB->getParent();
1910 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
1911 return NewMBB;
1912}
1913
1917 NewMBB->splice(NewMBB->begin(), MBB,
1918 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
1920 return NewMBB;
1921}
1922
1926 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
1928 return NewMBB;
1929}
1930
1931unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const {
1932 if (!STI.hasLoadAndTrap())
1933 return 0;
1934 switch (Opcode) {
1935 case SystemZ::L:
1936 case SystemZ::LY:
1937 return SystemZ::LAT;
1938 case SystemZ::LG:
1939 return SystemZ::LGAT;
1940 case SystemZ::LFH:
1941 return SystemZ::LFHAT;
1942 case SystemZ::LLGF:
1943 return SystemZ::LLGFAT;
1944 case SystemZ::LLGT:
1945 return SystemZ::LLGTAT;
1946 }
1947 return 0;
1948}
1949
1952 unsigned Reg, uint64_t Value) const {
1953 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1954 unsigned Opcode = 0;
1955 if (isInt<16>(Value))
1956 Opcode = SystemZ::LGHI;
1957 else if (SystemZ::isImmLL(Value))
1958 Opcode = SystemZ::LLILL;
1959 else if (SystemZ::isImmLH(Value)) {
1960 Opcode = SystemZ::LLILH;
1961 Value >>= 16;
1962 }
1963 else if (isInt<32>(Value))
1964 Opcode = SystemZ::LGFI;
1965 if (Opcode) {
1966 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
1967 return;
1968 }
1969
1971 assert (MRI.isSSA() && "Huge values only handled before reg-alloc .");
1972 Register Reg0 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
1973 Register Reg1 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
1974 BuildMI(MBB, MBBI, DL, get(SystemZ::IMPLICIT_DEF), Reg0);
1975 BuildMI(MBB, MBBI, DL, get(SystemZ::IIHF64), Reg1)
1976 .addReg(Reg0).addImm(Value >> 32);
1977 BuildMI(MBB, MBBI, DL, get(SystemZ::IILF64), Reg)
1978 .addReg(Reg1).addImm(Value & ((uint64_t(1) << 32) - 1));
1979}
1980
1982 StringRef &ErrInfo) const {
1983 const MCInstrDesc &MCID = MI.getDesc();
1984 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
1985 if (I >= MCID.getNumOperands())
1986 break;
1987 const MachineOperand &Op = MI.getOperand(I);
1988 const MCOperandInfo &MCOI = MCID.operands()[I];
1989 // Addressing modes have register and immediate operands. Op should be a
1990 // register (or frame index) operand if MCOI.RegClass contains a valid
1991 // register class, or an immediate otherwise.
1992 if (MCOI.OperandType == MCOI::OPERAND_MEMORY &&
1993 ((MCOI.RegClass != -1 && !Op.isReg() && !Op.isFI()) ||
1994 (MCOI.RegClass == -1 && !Op.isImm()))) {
1995 ErrInfo = "Addressing mode operands corrupt!";
1996 return false;
1997 }
1998 }
1999
2000 return true;
2001}
2002
2005 const MachineInstr &MIb) const {
2006
2007 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
2008 return false;
2009
2010 // If mem-operands show that the same address Value is used by both
2011 // instructions, check for non-overlapping offsets and widths. Not
2012 // sure if a register based analysis would be an improvement...
2013
2014 MachineMemOperand *MMOa = *MIa.memoperands_begin();
2015 MachineMemOperand *MMOb = *MIb.memoperands_begin();
2016 const Value *VALa = MMOa->getValue();
2017 const Value *VALb = MMOb->getValue();
2018 bool SameVal = (VALa && VALb && (VALa == VALb));
2019 if (!SameVal) {
2020 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
2021 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
2022 if (PSVa && PSVb && (PSVa == PSVb))
2023 SameVal = true;
2024 }
2025 if (SameVal) {
2026 int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset();
2027 int WidthA = MMOa->getSize(), WidthB = MMOb->getSize();
2028 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
2029 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
2030 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2031 if (LowOffset + LowWidth <= HighOffset)
2032 return true;
2033 }
2034
2035 return false;
2036}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
bool End
Definition: ELF_riscv.cpp:464
IRTranslator LLVM IR MI
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag)
static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI)
static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI, MachineInstr::MIFlag Flag)
static int isSimpleMove(const MachineInstr &MI, int &FrameIndex, unsigned Flag)
static LogicOp interpretAndImmediate(unsigned Opcode)
static uint64_t allOnes(unsigned int Count)
@ Flags
Definition: TextStubV5.cpp:93
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:470
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
A debug info location.
Definition: DebugLoc.h:33
SlotIndexes * getSlotIndexes() const
VNInfo::Allocator & getVNInfoAllocator()
LiveRange & getRegUnit(unsigned Unit)
Return the live range for register unit Unit.
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:50
bool contains(MCPhysReg Reg) const
Returns true if register Reg is contained in the set.
Definition: LivePhysRegs.h:107
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
VNInfo * createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc)
createDeadDef - Make sure the range has a value defined at Def.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:91
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
Definition: MCRegister.h:67
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
void insert(iterator MBBI, MachineBasicBlock *MBB)
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Definition: MachineInstr.h:68
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:357
bool registerDefIsDead(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Returns true if the register is dead in this machine instruction.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:746
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:641
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:731
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
void setFlag(MIFlag Flag)
Set a MI flag.
Definition: MachineInstr.h:362
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
MachineOperand * findRegisterDefOperand(Register Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
uint64_t getSize() const
Return the size in bytes of the memory reference.
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition: StackMaps.h:76
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Definition: StackMaps.h:104
Special value supplied for machine level alias analysis.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:259
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:390
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A SystemZ-specific class detailing special use registers particular for calling conventions.
unsigned getLoadAndTrap(unsigned Opcode) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
unsigned getLoadAndTest(unsigned Opcode) const
bool isPredicable(const MachineInstr &MI) const override
bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset, const MachineInstr *MI=nullptr) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
SystemZInstrInfo(SystemZSubtarget &STI)
bool hasDisplacementPairInsn(unsigned Opcode) const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned CommuteOpIdx1, unsigned CommuteOpIdx2) const override
Commutes the operands in the given instruction by changing the operands order and/or changing the ins...
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
SystemZII::Branch getBranchInfo(const MachineInstr &MI) const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
unsigned getFusedCompare(unsigned Opcode, SystemZII::FusedCompareType Type, const MachineInstr *MI=nullptr) const
bool expandPostRAPseudo(MachineInstr &MBBI) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
void getLoadStoreOpcodes(const TargetRegisterClass *RC, unsigned &LoadOpcode, unsigned &StoreOpcode) const
bool isRxSBGMask(uint64_t Mask, unsigned BitSize, unsigned &Start, unsigned &End) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool prepareCompareSwapOperands(MachineBasicBlock::iterator MBBI) const
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
void loadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Reg, uint64_t Value) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
SystemZCallingConventionRegisters * getSpecialRegisters() const
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Target - Wrapper for Target specific information.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_MEMORY
Definition: MCInstrDesc.h:62
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
static unsigned getAccessSize(unsigned int Flags)
unsigned getFirstReg(unsigned Reg)
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
int getTargetMemOpcode(uint16_t Opcode)
const unsigned CCMASK_CMP_GT
Definition: SystemZ.h:37
const unsigned CCMASK_ANY
Definition: SystemZ.h:31
static bool isImmLL(uint64_t Val)
Definition: SystemZ.h:161
static bool isImmLH(uint64_t Val)
Definition: SystemZ.h:166
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
unsigned reverseCCMask(unsigned CCMask)
const unsigned IPM_CC
Definition: SystemZ.h:112
const unsigned CCMASK_CMP_EQ
Definition: SystemZ.h:35
const unsigned CCMASK_ICMP
Definition: SystemZ.h:47
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_CMP_LT
Definition: SystemZ.h:36
const unsigned CCMASK_CMP_NE
Definition: SystemZ.h:38
bool isHighReg(unsigned int Reg)
const unsigned CCMASK_CMP_UO
Definition: SystemZ.h:43
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:413
@ Offset
Definition: DWP.cpp:406
@ Length
Definition: DWP.cpp:406
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:286
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
unsigned getUndefRegState(bool B)
@ And
Bitwise or logical AND of integers.
unsigned getKillRegState(bool B)