LLVM 20.0.0git
AArch64MIPeepholeOpt.cpp
Go to the documentation of this file.
1//===- AArch64MIPeepholeOpt.cpp - AArch64 MI peephole optimization pass ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs below peephole optimizations on MIR level.
10//
11// 1. MOVi32imm + ANDWrr ==> ANDWri + ANDWri
12// MOVi64imm + ANDXrr ==> ANDXri + ANDXri
13//
14// 2. MOVi32imm + ADDWrr ==> ADDWRi + ADDWRi
15// MOVi64imm + ADDXrr ==> ANDXri + ANDXri
16//
17// 3. MOVi32imm + SUBWrr ==> SUBWRi + SUBWRi
18// MOVi64imm + SUBXrr ==> SUBXri + SUBXri
19//
20// The mov pseudo instruction could be expanded to multiple mov instructions
21// later. In this case, we could try to split the constant operand of mov
22// instruction into two immediates which can be directly encoded into
23// *Wri/*Xri instructions. It makes two AND/ADD/SUB instructions instead of
24// multiple `mov` + `and/add/sub` instructions.
25//
26// 4. Remove redundant ORRWrs which is generated by zero-extend.
27//
28// %3:gpr32 = ORRWrs $wzr, %2, 0
29// %4:gpr64 = SUBREG_TO_REG 0, %3, %subreg.sub_32
30//
31// If AArch64's 32-bit form of instruction defines the source operand of
32// ORRWrs, we can remove the ORRWrs because the upper 32 bits of the source
33// operand are set to zero.
34//
35// 5. %reg = INSERT_SUBREG %reg(tied-def 0), %subreg, subidx
36// ==> %reg:subidx = SUBREG_TO_REG 0, %subreg, subidx
37//
38// 6. %intermediate:gpr32 = COPY %src:fpr128
39// %dst:fpr128 = INSvi32gpr %dst_vec:fpr128, dst_index, %intermediate:gpr32
40// ==> %dst:fpr128 = INSvi32lane %dst_vec:fpr128, dst_index, %src:fpr128, 0
41//
42// In cases where a source FPR is copied to a GPR in order to be copied
43// to a destination FPR, we can directly copy the values between the FPRs,
44// eliminating the use of the Integer unit. When we match a pattern of
45// INSvi[X]gpr that is preceded by a chain of COPY instructions from a FPR
46// source, we use the INSvi[X]lane to replace the COPY & INSvi[X]gpr
47// instructions.
48//
49// 7. If MI sets zero for high 64-bits implicitly, remove `mov 0` for high
50// 64-bits. For example,
51//
52// %1:fpr64 = nofpexcept FCVTNv4i16 %0:fpr128, implicit $fpcr
53// %2:fpr64 = MOVID 0
54// %4:fpr128 = IMPLICIT_DEF
55// %3:fpr128 = INSERT_SUBREG %4:fpr128(tied-def 0), %2:fpr64, %subreg.dsub
56// %6:fpr128 = IMPLICIT_DEF
57// %5:fpr128 = INSERT_SUBREG %6:fpr128(tied-def 0), %1:fpr64, %subreg.dsub
58// %7:fpr128 = INSvi64lane %5:fpr128(tied-def 0), 1, %3:fpr128, 0
59// ==>
60// %1:fpr64 = nofpexcept FCVTNv4i16 %0:fpr128, implicit $fpcr
61// %6:fpr128 = IMPLICIT_DEF
62// %7:fpr128 = INSERT_SUBREG %6:fpr128(tied-def 0), %1:fpr64, %subreg.dsub
63//
64// 8. Remove redundant CSELs that select between identical registers, by
65// replacing them with unconditional moves.
66//
67//===----------------------------------------------------------------------===//
68
69#include "AArch64ExpandImm.h"
70#include "AArch64InstrInfo.h"
74
75using namespace llvm;
76
77#define DEBUG_TYPE "aarch64-mi-peephole-opt"
78
79namespace {
80
81struct AArch64MIPeepholeOpt : public MachineFunctionPass {
82 static char ID;
83
84 AArch64MIPeepholeOpt() : MachineFunctionPass(ID) {
86 }
87
88 const AArch64InstrInfo *TII;
90 MachineLoopInfo *MLI;
92
93 using OpcodePair = std::pair<unsigned, unsigned>;
94 template <typename T>
95 using SplitAndOpcFunc =
96 std::function<std::optional<OpcodePair>(T, unsigned, T &, T &)>;
97 using BuildMIFunc =
98 std::function<void(MachineInstr &, OpcodePair, unsigned, unsigned,
100
101 /// For instructions where an immediate operand could be split into two
102 /// separate immediate instructions, use the splitTwoPartImm two handle the
103 /// optimization.
104 ///
105 /// To implement, the following function types must be passed to
106 /// splitTwoPartImm. A SplitAndOpcFunc must be implemented that determines if
107 /// splitting the immediate is valid and returns the associated new opcode. A
108 /// BuildMIFunc must be implemented to build the two immediate instructions.
109 ///
110 /// Example Pattern (where IMM would require 2+ MOV instructions):
111 /// %dst = <Instr>rr %src IMM [...]
112 /// becomes:
113 /// %tmp = <Instr>ri %src (encode half IMM) [...]
114 /// %dst = <Instr>ri %tmp (encode half IMM) [...]
115 template <typename T>
116 bool splitTwoPartImm(MachineInstr &MI,
117 SplitAndOpcFunc<T> SplitAndOpc, BuildMIFunc BuildInstr);
118
119 bool checkMovImmInstr(MachineInstr &MI, MachineInstr *&MovMI,
120 MachineInstr *&SubregToRegMI);
121
122 template <typename T>
123 bool visitADDSUB(unsigned PosOpc, unsigned NegOpc, MachineInstr &MI);
124 template <typename T>
125 bool visitADDSSUBS(OpcodePair PosOpcs, OpcodePair NegOpcs, MachineInstr &MI);
126
127 template <typename T>
128 bool visitAND(unsigned Opc, MachineInstr &MI);
129 bool visitORR(MachineInstr &MI);
130 bool visitCSEL(MachineInstr &MI);
131 bool visitINSERT(MachineInstr &MI);
132 bool visitINSviGPR(MachineInstr &MI, unsigned Opc);
133 bool visitINSvi64lane(MachineInstr &MI);
134 bool visitFMOVDr(MachineInstr &MI);
135 bool visitCopy(MachineInstr &MI);
136 bool runOnMachineFunction(MachineFunction &MF) override;
137
138 StringRef getPassName() const override {
139 return "AArch64 MI Peephole Optimization pass";
140 }
141
142 void getAnalysisUsage(AnalysisUsage &AU) const override {
143 AU.setPreservesCFG();
146 }
147};
148
149char AArch64MIPeepholeOpt::ID = 0;
150
151} // end anonymous namespace
152
153INITIALIZE_PASS(AArch64MIPeepholeOpt, "aarch64-mi-peephole-opt",
154 "AArch64 MI Peephole Optimization", false, false)
155
156template <typename T>
157static bool splitBitmaskImm(T Imm, unsigned RegSize, T &Imm1Enc, T &Imm2Enc) {
158 T UImm = static_cast<T>(Imm);
160 return false;
161
162 // If this immediate can be handled by one instruction, do not split it.
165 if (Insn.size() == 1)
166 return false;
167
168 // The bitmask immediate consists of consecutive ones. Let's say there is
169 // constant 0b00000000001000000000010000000000 which does not consist of
170 // consecutive ones. We can split it in to two bitmask immediate like
171 // 0b00000000001111111111110000000000 and 0b11111111111000000000011111111111.
172 // If we do AND with these two bitmask immediate, we can see original one.
174 unsigned HighestBitSet = Log2_64(UImm);
175
176 // Create a mask which is filled with one from the position of lowest bit set
177 // to the position of highest bit set.
178 T NewImm1 = (static_cast<T>(2) << HighestBitSet) -
179 (static_cast<T>(1) << LowestBitSet);
180 // Create a mask which is filled with one outside the position of lowest bit
181 // set and the position of highest bit set.
182 T NewImm2 = UImm | ~NewImm1;
183
184 // If the split value is not valid bitmask immediate, do not split this
185 // constant.
187 return false;
188
191 return true;
192}
193
194template <typename T>
195bool AArch64MIPeepholeOpt::visitAND(
196 unsigned Opc, MachineInstr &MI) {
197 // Try below transformation.
198 //
199 // MOVi32imm + ANDWrr ==> ANDWri + ANDWri
200 // MOVi64imm + ANDXrr ==> ANDXri + ANDXri
201 //
202 // The mov pseudo instruction could be expanded to multiple mov instructions
203 // later. Let's try to split the constant operand of mov instruction into two
204 // bitmask immediates. It makes only two AND instructions intead of multiple
205 // mov + and instructions.
206
207 return splitTwoPartImm<T>(
208 MI,
209 [Opc](T Imm, unsigned RegSize, T &Imm0,
210 T &Imm1) -> std::optional<OpcodePair> {
211 if (splitBitmaskImm(Imm, RegSize, Imm0, Imm1))
212 return std::make_pair(Opc, Opc);
213 return std::nullopt;
214 },
215 [&TII = TII](MachineInstr &MI, OpcodePair Opcode, unsigned Imm0,
216 unsigned Imm1, Register SrcReg, Register NewTmpReg,
217 Register NewDstReg) {
218 DebugLoc DL = MI.getDebugLoc();
219 MachineBasicBlock *MBB = MI.getParent();
220 BuildMI(*MBB, MI, DL, TII->get(Opcode.first), NewTmpReg)
221 .addReg(SrcReg)
222 .addImm(Imm0);
223 BuildMI(*MBB, MI, DL, TII->get(Opcode.second), NewDstReg)
224 .addReg(NewTmpReg)
225 .addImm(Imm1);
226 });
227}
228
229bool AArch64MIPeepholeOpt::visitORR(MachineInstr &MI) {
230 // Check this ORR comes from below zero-extend pattern.
231 //
232 // def : Pat<(i64 (zext GPR32:$src)),
233 // (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
234 if (MI.getOperand(3).getImm() != 0)
235 return false;
236
237 if (MI.getOperand(1).getReg() != AArch64::WZR)
238 return false;
239
240 MachineInstr *SrcMI = MRI->getUniqueVRegDef(MI.getOperand(2).getReg());
241 if (!SrcMI)
242 return false;
243
244 // From https://developer.arm.com/documentation/dui0801/b/BABBGCAC
245 //
246 // When you use the 32-bit form of an instruction, the upper 32 bits of the
247 // source registers are ignored and the upper 32 bits of the destination
248 // register are set to zero.
249 //
250 // If AArch64's 32-bit form of instruction defines the source operand of
251 // zero-extend, we do not need the zero-extend. Let's check the MI's opcode is
252 // real AArch64 instruction and if it is not, do not process the opcode
253 // conservatively.
254 if (SrcMI->getOpcode() == TargetOpcode::COPY &&
255 SrcMI->getOperand(1).getReg().isVirtual()) {
256 const TargetRegisterClass *RC =
257 MRI->getRegClass(SrcMI->getOperand(1).getReg());
258
259 // A COPY from an FPR will become a FMOVSWr, so do so now so that we know
260 // that the upper bits are zero.
261 if (RC != &AArch64::FPR32RegClass &&
262 ((RC != &AArch64::FPR64RegClass && RC != &AArch64::FPR128RegClass) ||
263 SrcMI->getOperand(1).getSubReg() != AArch64::ssub))
264 return false;
265 Register CpySrc = SrcMI->getOperand(1).getReg();
266 if (SrcMI->getOperand(1).getSubReg() == AArch64::ssub) {
267 CpySrc = MRI->createVirtualRegister(&AArch64::FPR32RegClass);
268 BuildMI(*SrcMI->getParent(), SrcMI, SrcMI->getDebugLoc(),
269 TII->get(TargetOpcode::COPY), CpySrc)
270 .add(SrcMI->getOperand(1));
271 }
272 BuildMI(*SrcMI->getParent(), SrcMI, SrcMI->getDebugLoc(),
273 TII->get(AArch64::FMOVSWr), SrcMI->getOperand(0).getReg())
274 .addReg(CpySrc);
275 SrcMI->eraseFromParent();
276 }
277 else if (SrcMI->getOpcode() <= TargetOpcode::GENERIC_OP_END)
278 return false;
279
280 Register DefReg = MI.getOperand(0).getReg();
281 Register SrcReg = MI.getOperand(2).getReg();
282 MRI->replaceRegWith(DefReg, SrcReg);
283 MRI->clearKillFlags(SrcReg);
284 LLVM_DEBUG(dbgs() << "Removed: " << MI << "\n");
285 MI.eraseFromParent();
286
287 return true;
288}
289
290bool AArch64MIPeepholeOpt::visitCSEL(MachineInstr &MI) {
291 // Replace CSEL with MOV when both inputs are the same register.
292 if (MI.getOperand(1).getReg() != MI.getOperand(2).getReg())
293 return false;
294
295 auto ZeroReg =
296 MI.getOpcode() == AArch64::CSELXr ? AArch64::XZR : AArch64::WZR;
297 auto OrOpcode =
298 MI.getOpcode() == AArch64::CSELXr ? AArch64::ORRXrs : AArch64::ORRWrs;
299
300 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(OrOpcode))
301 .addReg(MI.getOperand(0).getReg(), RegState::Define)
302 .addReg(ZeroReg)
303 .addReg(MI.getOperand(1).getReg())
304 .addImm(0);
305
306 MI.eraseFromParent();
307 return true;
308}
309
310bool AArch64MIPeepholeOpt::visitINSERT(MachineInstr &MI) {
311 // Check this INSERT_SUBREG comes from below zero-extend pattern.
312 //
313 // From %reg = INSERT_SUBREG %reg(tied-def 0), %subreg, subidx
314 // To %reg:subidx = SUBREG_TO_REG 0, %subreg, subidx
315 //
316 // We're assuming the first operand to INSERT_SUBREG is irrelevant because a
317 // COPY would destroy the upper part of the register anyway
318 if (!MI.isRegTiedToDefOperand(1))
319 return false;
320
321 Register DstReg = MI.getOperand(0).getReg();
322 const TargetRegisterClass *RC = MRI->getRegClass(DstReg);
323 MachineInstr *SrcMI = MRI->getUniqueVRegDef(MI.getOperand(2).getReg());
324 if (!SrcMI)
325 return false;
326
327 // From https://developer.arm.com/documentation/dui0801/b/BABBGCAC
328 //
329 // When you use the 32-bit form of an instruction, the upper 32 bits of the
330 // source registers are ignored and the upper 32 bits of the destination
331 // register are set to zero.
332 //
333 // If AArch64's 32-bit form of instruction defines the source operand of
334 // zero-extend, we do not need the zero-extend. Let's check the MI's opcode is
335 // real AArch64 instruction and if it is not, do not process the opcode
336 // conservatively.
337 if ((SrcMI->getOpcode() <= TargetOpcode::GENERIC_OP_END) ||
338 !AArch64::GPR64allRegClass.hasSubClassEq(RC))
339 return false;
340
341 // Build a SUBREG_TO_REG instruction
342 MachineInstr *SubregMI =
343 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
344 TII->get(TargetOpcode::SUBREG_TO_REG), DstReg)
345 .addImm(0)
346 .add(MI.getOperand(2))
347 .add(MI.getOperand(3));
348 LLVM_DEBUG(dbgs() << MI << " replace by:\n: " << *SubregMI << "\n");
349 (void)SubregMI;
350 MI.eraseFromParent();
351
352 return true;
353}
354
355template <typename T>
356static bool splitAddSubImm(T Imm, unsigned RegSize, T &Imm0, T &Imm1) {
357 // The immediate must be in the form of ((imm0 << 12) + imm1), in which both
358 // imm0 and imm1 are non-zero 12-bit unsigned int.
359 if ((Imm & 0xfff000) == 0 || (Imm & 0xfff) == 0 ||
360 (Imm & ~static_cast<T>(0xffffff)) != 0)
361 return false;
362
363 // The immediate can not be composed via a single instruction.
366 if (Insn.size() == 1)
367 return false;
368
369 // Split Imm into (Imm0 << 12) + Imm1;
370 Imm0 = (Imm >> 12) & 0xfff;
371 Imm1 = Imm & 0xfff;
372 return true;
373}
374
375template <typename T>
376bool AArch64MIPeepholeOpt::visitADDSUB(
377 unsigned PosOpc, unsigned NegOpc, MachineInstr &MI) {
378 // Try below transformation.
379 //
380 // ADDWrr X, MOVi32imm ==> ADDWri + ADDWri
381 // ADDXrr X, MOVi64imm ==> ADDXri + ADDXri
382 //
383 // SUBWrr X, MOVi32imm ==> SUBWri + SUBWri
384 // SUBXrr X, MOVi64imm ==> SUBXri + SUBXri
385 //
386 // The mov pseudo instruction could be expanded to multiple mov instructions
387 // later. Let's try to split the constant operand of mov instruction into two
388 // legal add/sub immediates. It makes only two ADD/SUB instructions intead of
389 // multiple `mov` + `and/sub` instructions.
390
391 // We can sometimes have ADDWrr WZR, MULi32imm that have not been constant
392 // folded. Make sure that we don't generate invalid instructions that use XZR
393 // in those cases.
394 if (MI.getOperand(1).getReg() == AArch64::XZR ||
395 MI.getOperand(1).getReg() == AArch64::WZR)
396 return false;
397
398 return splitTwoPartImm<T>(
399 MI,
400 [PosOpc, NegOpc](T Imm, unsigned RegSize, T &Imm0,
401 T &Imm1) -> std::optional<OpcodePair> {
402 if (splitAddSubImm(Imm, RegSize, Imm0, Imm1))
403 return std::make_pair(PosOpc, PosOpc);
404 if (splitAddSubImm(-Imm, RegSize, Imm0, Imm1))
405 return std::make_pair(NegOpc, NegOpc);
406 return std::nullopt;
407 },
408 [&TII = TII](MachineInstr &MI, OpcodePair Opcode, unsigned Imm0,
409 unsigned Imm1, Register SrcReg, Register NewTmpReg,
410 Register NewDstReg) {
411 DebugLoc DL = MI.getDebugLoc();
412 MachineBasicBlock *MBB = MI.getParent();
413 BuildMI(*MBB, MI, DL, TII->get(Opcode.first), NewTmpReg)
414 .addReg(SrcReg)
415 .addImm(Imm0)
416 .addImm(12);
417 BuildMI(*MBB, MI, DL, TII->get(Opcode.second), NewDstReg)
418 .addReg(NewTmpReg)
419 .addImm(Imm1)
420 .addImm(0);
421 });
422}
423
424template <typename T>
425bool AArch64MIPeepholeOpt::visitADDSSUBS(
426 OpcodePair PosOpcs, OpcodePair NegOpcs, MachineInstr &MI) {
427 // Try the same transformation as ADDSUB but with additional requirement
428 // that the condition code usages are only for Equal and Not Equal
429
430 if (MI.getOperand(1).getReg() == AArch64::XZR ||
431 MI.getOperand(1).getReg() == AArch64::WZR)
432 return false;
433
434 return splitTwoPartImm<T>(
435 MI,
436 [PosOpcs, NegOpcs, &MI, &TRI = TRI,
437 &MRI = MRI](T Imm, unsigned RegSize, T &Imm0,
438 T &Imm1) -> std::optional<OpcodePair> {
439 OpcodePair OP;
440 if (splitAddSubImm(Imm, RegSize, Imm0, Imm1))
441 OP = PosOpcs;
442 else if (splitAddSubImm(-Imm, RegSize, Imm0, Imm1))
443 OP = NegOpcs;
444 else
445 return std::nullopt;
446 // Check conditional uses last since it is expensive for scanning
447 // proceeding instructions
448 MachineInstr &SrcMI = *MRI->getUniqueVRegDef(MI.getOperand(1).getReg());
449 std::optional<UsedNZCV> NZCVUsed = examineCFlagsUse(SrcMI, MI, *TRI);
450 if (!NZCVUsed || NZCVUsed->C || NZCVUsed->V)
451 return std::nullopt;
452 return OP;
453 },
454 [&TII = TII](MachineInstr &MI, OpcodePair Opcode, unsigned Imm0,
455 unsigned Imm1, Register SrcReg, Register NewTmpReg,
456 Register NewDstReg) {
457 DebugLoc DL = MI.getDebugLoc();
458 MachineBasicBlock *MBB = MI.getParent();
459 BuildMI(*MBB, MI, DL, TII->get(Opcode.first), NewTmpReg)
460 .addReg(SrcReg)
461 .addImm(Imm0)
462 .addImm(12);
463 BuildMI(*MBB, MI, DL, TII->get(Opcode.second), NewDstReg)
464 .addReg(NewTmpReg)
465 .addImm(Imm1)
466 .addImm(0);
467 });
468}
469
470// Checks if the corresponding MOV immediate instruction is applicable for
471// this peephole optimization.
472bool AArch64MIPeepholeOpt::checkMovImmInstr(MachineInstr &MI,
473 MachineInstr *&MovMI,
474 MachineInstr *&SubregToRegMI) {
475 // Check whether current MBB is in loop and the AND is loop invariant.
476 MachineBasicBlock *MBB = MI.getParent();
477 MachineLoop *L = MLI->getLoopFor(MBB);
478 if (L && !L->isLoopInvariant(MI))
479 return false;
480
481 // Check whether current MI's operand is MOV with immediate.
482 MovMI = MRI->getUniqueVRegDef(MI.getOperand(2).getReg());
483 if (!MovMI)
484 return false;
485
486 // If it is SUBREG_TO_REG, check its operand.
487 SubregToRegMI = nullptr;
488 if (MovMI->getOpcode() == TargetOpcode::SUBREG_TO_REG) {
489 SubregToRegMI = MovMI;
490 MovMI = MRI->getUniqueVRegDef(MovMI->getOperand(2).getReg());
491 if (!MovMI)
492 return false;
493 }
494
495 if (MovMI->getOpcode() != AArch64::MOVi32imm &&
496 MovMI->getOpcode() != AArch64::MOVi64imm)
497 return false;
498
499 // If the MOV has multiple uses, do not split the immediate because it causes
500 // more instructions.
501 if (!MRI->hasOneUse(MovMI->getOperand(0).getReg()))
502 return false;
503 if (SubregToRegMI && !MRI->hasOneUse(SubregToRegMI->getOperand(0).getReg()))
504 return false;
505
506 // It is OK to perform this peephole optimization.
507 return true;
508}
509
510template <typename T>
511bool AArch64MIPeepholeOpt::splitTwoPartImm(
513 SplitAndOpcFunc<T> SplitAndOpc, BuildMIFunc BuildInstr) {
514 unsigned RegSize = sizeof(T) * 8;
515 assert((RegSize == 32 || RegSize == 64) &&
516 "Invalid RegSize for legal immediate peephole optimization");
517
518 // Perform several essential checks against current MI.
519 MachineInstr *MovMI, *SubregToRegMI;
520 if (!checkMovImmInstr(MI, MovMI, SubregToRegMI))
521 return false;
522
523 // Split the immediate to Imm0 and Imm1, and calculate the Opcode.
524 T Imm = static_cast<T>(MovMI->getOperand(1).getImm()), Imm0, Imm1;
525 // For the 32 bit form of instruction, the upper 32 bits of the destination
526 // register are set to zero. If there is SUBREG_TO_REG, set the upper 32 bits
527 // of Imm to zero. This is essential if the Immediate value was a negative
528 // number since it was sign extended when we assign to the 64-bit Imm.
529 if (SubregToRegMI)
530 Imm &= 0xFFFFFFFF;
531 OpcodePair Opcode;
532 if (auto R = SplitAndOpc(Imm, RegSize, Imm0, Imm1))
533 Opcode = *R;
534 else
535 return false;
536
537 // Create new MIs using the first and second opcodes. Opcodes might differ for
538 // flag setting operations that should only set flags on second instruction.
539 // NewTmpReg = Opcode.first SrcReg Imm0
540 // NewDstReg = Opcode.second NewTmpReg Imm1
541
542 // Determine register classes for destinations and register operands
543 MachineFunction *MF = MI.getMF();
544 const TargetRegisterClass *FirstInstrDstRC =
545 TII->getRegClass(TII->get(Opcode.first), 0, TRI, *MF);
546 const TargetRegisterClass *FirstInstrOperandRC =
547 TII->getRegClass(TII->get(Opcode.first), 1, TRI, *MF);
548 const TargetRegisterClass *SecondInstrDstRC =
549 (Opcode.first == Opcode.second)
550 ? FirstInstrDstRC
551 : TII->getRegClass(TII->get(Opcode.second), 0, TRI, *MF);
552 const TargetRegisterClass *SecondInstrOperandRC =
553 (Opcode.first == Opcode.second)
554 ? FirstInstrOperandRC
555 : TII->getRegClass(TII->get(Opcode.second), 1, TRI, *MF);
556
557 // Get old registers destinations and new register destinations
558 Register DstReg = MI.getOperand(0).getReg();
559 Register SrcReg = MI.getOperand(1).getReg();
560 Register NewTmpReg = MRI->createVirtualRegister(FirstInstrDstRC);
561 // In the situation that DstReg is not Virtual (likely WZR or XZR), we want to
562 // reuse that same destination register.
563 Register NewDstReg = DstReg.isVirtual()
564 ? MRI->createVirtualRegister(SecondInstrDstRC)
565 : DstReg;
566
567 // Constrain registers based on their new uses
568 MRI->constrainRegClass(SrcReg, FirstInstrOperandRC);
569 MRI->constrainRegClass(NewTmpReg, SecondInstrOperandRC);
570 if (DstReg != NewDstReg)
571 MRI->constrainRegClass(NewDstReg, MRI->getRegClass(DstReg));
572
573 // Call the delegating operation to build the instruction
574 BuildInstr(MI, Opcode, Imm0, Imm1, SrcReg, NewTmpReg, NewDstReg);
575
576 // replaceRegWith changes MI's definition register. Keep it for SSA form until
577 // deleting MI. Only if we made a new destination register.
578 if (DstReg != NewDstReg) {
579 MRI->replaceRegWith(DstReg, NewDstReg);
580 MI.getOperand(0).setReg(DstReg);
581 }
582
583 // Record the MIs need to be removed.
584 MI.eraseFromParent();
585 if (SubregToRegMI)
586 SubregToRegMI->eraseFromParent();
587 MovMI->eraseFromParent();
588
589 return true;
590}
591
592bool AArch64MIPeepholeOpt::visitINSviGPR(MachineInstr &MI, unsigned Opc) {
593 // Check if this INSvi[X]gpr comes from COPY of a source FPR128
594 //
595 // From
596 // %intermediate1:gpr64 = COPY %src:fpr128
597 // %intermediate2:gpr32 = COPY %intermediate1:gpr64
598 // %dst:fpr128 = INSvi[X]gpr %dst_vec:fpr128, dst_index, %intermediate2:gpr32
599 // To
600 // %dst:fpr128 = INSvi[X]lane %dst_vec:fpr128, dst_index, %src:fpr128,
601 // src_index
602 // where src_index = 0, X = [8|16|32|64]
603
604 MachineInstr *SrcMI = MRI->getUniqueVRegDef(MI.getOperand(3).getReg());
605
606 // For a chain of COPY instructions, find the initial source register
607 // and check if it's an FPR128
608 while (true) {
609 if (!SrcMI || SrcMI->getOpcode() != TargetOpcode::COPY)
610 return false;
611
612 if (!SrcMI->getOperand(1).getReg().isVirtual())
613 return false;
614
615 if (MRI->getRegClass(SrcMI->getOperand(1).getReg()) ==
616 &AArch64::FPR128RegClass) {
617 break;
618 }
619 SrcMI = MRI->getUniqueVRegDef(SrcMI->getOperand(1).getReg());
620 }
621
622 Register DstReg = MI.getOperand(0).getReg();
623 Register SrcReg = SrcMI->getOperand(1).getReg();
624 MachineInstr *INSvilaneMI =
625 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opc), DstReg)
626 .add(MI.getOperand(1))
627 .add(MI.getOperand(2))
628 .addUse(SrcReg, getRegState(SrcMI->getOperand(1)))
629 .addImm(0);
630
631 LLVM_DEBUG(dbgs() << MI << " replace by:\n: " << *INSvilaneMI << "\n");
632 (void)INSvilaneMI;
633 MI.eraseFromParent();
634 return true;
635}
636
637// All instructions that set a FPR64 will implicitly zero the top bits of the
638// register.
641 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
642 return false;
643 const TargetRegisterClass *RC = MRI->getRegClass(MI->getOperand(0).getReg());
644 if (RC != &AArch64::FPR64RegClass)
645 return false;
646 return MI->getOpcode() > TargetOpcode::GENERIC_OP_END;
647}
648
649bool AArch64MIPeepholeOpt::visitINSvi64lane(MachineInstr &MI) {
650 // Check the MI for low 64-bits sets zero for high 64-bits implicitly.
651 // We are expecting below case.
652 //
653 // %1:fpr64 = nofpexcept FCVTNv4i16 %0:fpr128, implicit $fpcr
654 // %6:fpr128 = IMPLICIT_DEF
655 // %5:fpr128 = INSERT_SUBREG %6:fpr128(tied-def 0), killed %1:fpr64, %subreg.dsub
656 // %7:fpr128 = INSvi64lane %5:fpr128(tied-def 0), 1, killed %3:fpr128, 0
657 MachineInstr *Low64MI = MRI->getUniqueVRegDef(MI.getOperand(1).getReg());
658 if (Low64MI->getOpcode() != AArch64::INSERT_SUBREG)
659 return false;
660 Low64MI = MRI->getUniqueVRegDef(Low64MI->getOperand(2).getReg());
661 if (!Low64MI || !is64bitDefwithZeroHigh64bit(Low64MI, MRI))
662 return false;
663
664 // Check there is `mov 0` MI for high 64-bits.
665 // We are expecting below cases.
666 //
667 // %2:fpr64 = MOVID 0
668 // %4:fpr128 = IMPLICIT_DEF
669 // %3:fpr128 = INSERT_SUBREG %4:fpr128(tied-def 0), killed %2:fpr64, %subreg.dsub
670 // %7:fpr128 = INSvi64lane %5:fpr128(tied-def 0), 1, killed %3:fpr128, 0
671 // or
672 // %5:fpr128 = MOVIv2d_ns 0
673 // %6:fpr64 = COPY %5.dsub:fpr128
674 // %8:fpr128 = IMPLICIT_DEF
675 // %7:fpr128 = INSERT_SUBREG %8:fpr128(tied-def 0), killed %6:fpr64, %subreg.dsub
676 // %11:fpr128 = INSvi64lane %9:fpr128(tied-def 0), 1, killed %7:fpr128, 0
677 MachineInstr *High64MI = MRI->getUniqueVRegDef(MI.getOperand(3).getReg());
678 if (!High64MI || High64MI->getOpcode() != AArch64::INSERT_SUBREG)
679 return false;
680 High64MI = MRI->getUniqueVRegDef(High64MI->getOperand(2).getReg());
681 if (High64MI && High64MI->getOpcode() == TargetOpcode::COPY)
682 High64MI = MRI->getUniqueVRegDef(High64MI->getOperand(1).getReg());
683 if (!High64MI || (High64MI->getOpcode() != AArch64::MOVID &&
684 High64MI->getOpcode() != AArch64::MOVIv2d_ns))
685 return false;
686 if (High64MI->getOperand(1).getImm() != 0)
687 return false;
688
689 // Let's remove MIs for high 64-bits.
690 Register OldDef = MI.getOperand(0).getReg();
691 Register NewDef = MI.getOperand(1).getReg();
692 MRI->constrainRegClass(NewDef, MRI->getRegClass(OldDef));
693 MRI->replaceRegWith(OldDef, NewDef);
694 MI.eraseFromParent();
695
696 return true;
697}
698
699bool AArch64MIPeepholeOpt::visitFMOVDr(MachineInstr &MI) {
700 // An FMOVDr sets the high 64-bits to zero implicitly, similar to ORR for GPR.
701 MachineInstr *Low64MI = MRI->getUniqueVRegDef(MI.getOperand(1).getReg());
702 if (!Low64MI || !is64bitDefwithZeroHigh64bit(Low64MI, MRI))
703 return false;
704
705 // Let's remove MIs for high 64-bits.
706 Register OldDef = MI.getOperand(0).getReg();
707 Register NewDef = MI.getOperand(1).getReg();
708 LLVM_DEBUG(dbgs() << "Removing: " << MI << "\n");
709 MRI->clearKillFlags(OldDef);
710 MRI->clearKillFlags(NewDef);
711 MRI->constrainRegClass(NewDef, MRI->getRegClass(OldDef));
712 MRI->replaceRegWith(OldDef, NewDef);
713 MI.eraseFromParent();
714
715 return true;
716}
717
718// Across a basic-block we might have in i32 extract from a value that only
719// operates on upper bits (for example a sxtw). We can replace the COPY with a
720// new version skipping the sxtw.
721bool AArch64MIPeepholeOpt::visitCopy(MachineInstr &MI) {
722 Register InputReg = MI.getOperand(1).getReg();
723 if (MI.getOperand(1).getSubReg() != AArch64::sub_32 ||
724 !MRI->hasOneNonDBGUse(InputReg))
725 return false;
726
727 MachineInstr *SrcMI = MRI->getUniqueVRegDef(InputReg);
729 DeadInstrs.insert(SrcMI);
730 while (SrcMI && SrcMI->isFullCopy() &&
731 MRI->hasOneNonDBGUse(SrcMI->getOperand(1).getReg())) {
732 SrcMI = MRI->getUniqueVRegDef(SrcMI->getOperand(1).getReg());
733 DeadInstrs.insert(SrcMI);
734 }
735
736 if (!SrcMI)
737 return false;
738
739 // Look for SXTW(X) and return Reg.
740 auto getSXTWSrcReg = [](MachineInstr *SrcMI) -> Register {
741 if (SrcMI->getOpcode() != AArch64::SBFMXri ||
742 SrcMI->getOperand(2).getImm() != 0 ||
743 SrcMI->getOperand(3).getImm() != 31)
744 return AArch64::NoRegister;
745 return SrcMI->getOperand(1).getReg();
746 };
747 // Look for SUBREG_TO_REG(ORRWrr(WZR, COPY(X.sub_32)))
748 auto getUXTWSrcReg = [&](MachineInstr *SrcMI) -> Register {
749 if (SrcMI->getOpcode() != AArch64::SUBREG_TO_REG ||
750 SrcMI->getOperand(3).getImm() != AArch64::sub_32 ||
751 !MRI->hasOneNonDBGUse(SrcMI->getOperand(2).getReg()))
752 return AArch64::NoRegister;
753 MachineInstr *Orr = MRI->getUniqueVRegDef(SrcMI->getOperand(2).getReg());
754 if (!Orr || Orr->getOpcode() != AArch64::ORRWrr ||
755 Orr->getOperand(1).getReg() != AArch64::WZR ||
756 !MRI->hasOneNonDBGUse(Orr->getOperand(2).getReg()))
757 return AArch64::NoRegister;
758 MachineInstr *Cpy = MRI->getUniqueVRegDef(Orr->getOperand(2).getReg());
759 if (!Cpy || Cpy->getOpcode() != AArch64::COPY ||
760 Cpy->getOperand(1).getSubReg() != AArch64::sub_32)
761 return AArch64::NoRegister;
762 DeadInstrs.insert(Orr);
763 return Cpy->getOperand(1).getReg();
764 };
765
766 Register SrcReg = getSXTWSrcReg(SrcMI);
767 if (!SrcReg)
768 SrcReg = getUXTWSrcReg(SrcMI);
769 if (!SrcReg)
770 return false;
771
772 MRI->constrainRegClass(SrcReg, MRI->getRegClass(InputReg));
773 LLVM_DEBUG(dbgs() << "Optimizing: " << MI);
774 MI.getOperand(1).setReg(SrcReg);
775 LLVM_DEBUG(dbgs() << " to: " << MI);
776 for (auto *DeadMI : DeadInstrs) {
777 LLVM_DEBUG(dbgs() << " Removing: " << *DeadMI);
778 DeadMI->eraseFromParent();
779 }
780 return true;
781}
782
783bool AArch64MIPeepholeOpt::runOnMachineFunction(MachineFunction &MF) {
784 if (skipFunction(MF.getFunction()))
785 return false;
786
787 TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
788 TRI = static_cast<const AArch64RegisterInfo *>(
790 MLI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
791 MRI = &MF.getRegInfo();
792
793 assert(MRI->isSSA() && "Expected to be run on SSA form!");
794
795 bool Changed = false;
796
797 for (MachineBasicBlock &MBB : MF) {
799 switch (MI.getOpcode()) {
800 default:
801 break;
802 case AArch64::INSERT_SUBREG:
803 Changed |= visitINSERT(MI);
804 break;
805 case AArch64::ANDWrr:
806 Changed |= visitAND<uint32_t>(AArch64::ANDWri, MI);
807 break;
808 case AArch64::ANDXrr:
809 Changed |= visitAND<uint64_t>(AArch64::ANDXri, MI);
810 break;
811 case AArch64::ORRWrs:
812 Changed |= visitORR(MI);
813 break;
814 case AArch64::ADDWrr:
815 Changed |= visitADDSUB<uint32_t>(AArch64::ADDWri, AArch64::SUBWri, MI);
816 break;
817 case AArch64::SUBWrr:
818 Changed |= visitADDSUB<uint32_t>(AArch64::SUBWri, AArch64::ADDWri, MI);
819 break;
820 case AArch64::ADDXrr:
821 Changed |= visitADDSUB<uint64_t>(AArch64::ADDXri, AArch64::SUBXri, MI);
822 break;
823 case AArch64::SUBXrr:
824 Changed |= visitADDSUB<uint64_t>(AArch64::SUBXri, AArch64::ADDXri, MI);
825 break;
826 case AArch64::ADDSWrr:
827 Changed |=
828 visitADDSSUBS<uint32_t>({AArch64::ADDWri, AArch64::ADDSWri},
829 {AArch64::SUBWri, AArch64::SUBSWri}, MI);
830 break;
831 case AArch64::SUBSWrr:
832 Changed |=
833 visitADDSSUBS<uint32_t>({AArch64::SUBWri, AArch64::SUBSWri},
834 {AArch64::ADDWri, AArch64::ADDSWri}, MI);
835 break;
836 case AArch64::ADDSXrr:
837 Changed |=
838 visitADDSSUBS<uint64_t>({AArch64::ADDXri, AArch64::ADDSXri},
839 {AArch64::SUBXri, AArch64::SUBSXri}, MI);
840 break;
841 case AArch64::SUBSXrr:
842 Changed |=
843 visitADDSSUBS<uint64_t>({AArch64::SUBXri, AArch64::SUBSXri},
844 {AArch64::ADDXri, AArch64::ADDSXri}, MI);
845 break;
846 case AArch64::CSELWr:
847 case AArch64::CSELXr:
848 Changed |= visitCSEL(MI);
849 break;
850 case AArch64::INSvi64gpr:
851 Changed |= visitINSviGPR(MI, AArch64::INSvi64lane);
852 break;
853 case AArch64::INSvi32gpr:
854 Changed |= visitINSviGPR(MI, AArch64::INSvi32lane);
855 break;
856 case AArch64::INSvi16gpr:
857 Changed |= visitINSviGPR(MI, AArch64::INSvi16lane);
858 break;
859 case AArch64::INSvi8gpr:
860 Changed |= visitINSviGPR(MI, AArch64::INSvi8lane);
861 break;
862 case AArch64::INSvi64lane:
863 Changed |= visitINSvi64lane(MI);
864 break;
865 case AArch64::FMOVDr:
866 Changed |= visitFMOVDr(MI);
867 break;
868 case AArch64::COPY:
869 Changed |= visitCopy(MI);
870 break;
871 }
872 }
873 }
874
875 return Changed;
876}
877
879 return new AArch64MIPeepholeOpt();
880}
unsigned const MachineRegisterInfo * MRI
unsigned HighestBitSet
unsigned T T & Imm2Enc
unsigned T & Imm1Enc
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
unsigned RegSize
static bool is64bitDefwithZeroHigh64bit(MachineInstr *MI, MachineRegisterInfo *MRI)
static bool splitAddSubImm(T Imm, unsigned RegSize, T &Imm0, T &Imm1)
unsigned LowestBitSet
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define LLVM_DEBUG(X)
Definition: Debug.h:101
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define OP(OPC)
Definition: SandboxIR.h:653
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:256
A debug info location.
Definition: DebugLoc.h:33
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:346
bool isFullCopy() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:498
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
unsigned getSubReg() const
int64_t getImm() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Define
Register definition.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
FunctionPass * createAArch64MIPeepholeOptPass()
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:346
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
std::optional< UsedNZCV > examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI, SmallVectorImpl< MachineInstr * > *CCUseInstrs=nullptr)
unsigned getRegState(const MachineOperand &RegOp)
Get all register state flags from machine operand RegOp.
void initializeAArch64MIPeepholeOptPass(PassRegistry &)