LLVM 23.0.0git
LoongArchInstrInfo.cpp
Go to the documentation of this file.
1//=- LoongArchInstrInfo.cpp - LoongArch Instruction Information -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the LoongArch implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "LoongArchInstrInfo.h"
14#include "LoongArch.h"
21#include "llvm/MC/MCContext.h"
23
24using namespace llvm;
25
26#define GET_INSTRINFO_CTOR_DTOR
27#include "LoongArchGenInstrInfo.inc"
28
30 : LoongArchGenInstrInfo(STI, RegInfo, LoongArch::ADJCALLSTACKDOWN,
31 LoongArch::ADJCALLSTACKUP),
32 RegInfo(STI.getHwMode()), STI(STI) {}
33
35 return MCInstBuilder(LoongArch::ANDI)
36 .addReg(LoongArch::R0)
37 .addReg(LoongArch::R0)
38 .addImm(0);
39}
40
43 const DebugLoc &DL, Register DstReg,
44 Register SrcReg, bool KillSrc,
45 bool RenamableDest,
46 bool RenamableSrc) const {
47 if (LoongArch::GPRRegClass.contains(DstReg, SrcReg)) {
48 BuildMI(MBB, MBBI, DL, get(LoongArch::OR), DstReg)
49 .addReg(SrcReg, getKillRegState(KillSrc))
50 .addReg(LoongArch::R0);
51 return;
52 }
53
54 // VR->VR copies.
55 if (LoongArch::LSX128RegClass.contains(DstReg, SrcReg)) {
56 BuildMI(MBB, MBBI, DL, get(LoongArch::VORI_B), DstReg)
57 .addReg(SrcReg, getKillRegState(KillSrc))
58 .addImm(0);
59 return;
60 }
61
62 // XR->XR copies.
63 if (LoongArch::LASX256RegClass.contains(DstReg, SrcReg)) {
64 BuildMI(MBB, MBBI, DL, get(LoongArch::XVORI_B), DstReg)
65 .addReg(SrcReg, getKillRegState(KillSrc))
66 .addImm(0);
67 return;
68 }
69
70 // GPR->CFR copy.
71 if (LoongArch::CFRRegClass.contains(DstReg) &&
72 LoongArch::GPRRegClass.contains(SrcReg)) {
73 BuildMI(MBB, MBBI, DL, get(LoongArch::MOVGR2CF), DstReg)
74 .addReg(SrcReg, getKillRegState(KillSrc));
75 return;
76 }
77 // CFR->GPR copy.
78 if (LoongArch::GPRRegClass.contains(DstReg) &&
79 LoongArch::CFRRegClass.contains(SrcReg)) {
80 BuildMI(MBB, MBBI, DL, get(LoongArch::MOVCF2GR), DstReg)
81 .addReg(SrcReg, getKillRegState(KillSrc));
82 return;
83 }
84 // CFR->CFR copy.
85 if (LoongArch::CFRRegClass.contains(DstReg, SrcReg)) {
86 BuildMI(MBB, MBBI, DL, get(LoongArch::PseudoCopyCFR), DstReg)
87 .addReg(SrcReg, getKillRegState(KillSrc));
88 return;
89 }
90
91 // FPR->FPR copies.
92 unsigned Opc;
93 if (LoongArch::FPR32RegClass.contains(DstReg, SrcReg)) {
94 Opc = LoongArch::FMOV_S;
95 } else if (LoongArch::FPR64RegClass.contains(DstReg, SrcReg)) {
96 Opc = LoongArch::FMOV_D;
97 } else if (LoongArch::GPRRegClass.contains(DstReg) &&
98 LoongArch::FPR32RegClass.contains(SrcReg)) {
99 // FPR32 -> GPR copies
100 Opc = LoongArch::MOVFR2GR_S;
101 } else if (LoongArch::GPRRegClass.contains(DstReg) &&
102 LoongArch::FPR64RegClass.contains(SrcReg)) {
103 // FPR64 -> GPR copies
104 Opc = LoongArch::MOVFR2GR_D;
105 } else {
106 // TODO: support other copies.
107 llvm_unreachable("Impossible reg-to-reg copy");
108 }
109
110 BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
111 .addReg(SrcReg, getKillRegState(KillSrc));
112}
113
116 bool IsKill, int FI, const TargetRegisterClass *RC,
117
118 Register VReg, MachineInstr::MIFlag Flags) const {
119 MachineFunction *MF = MBB.getParent();
120 MachineFrameInfo &MFI = MF->getFrameInfo();
121
122 unsigned Opcode;
123 if (LoongArch::GPRRegClass.hasSubClassEq(RC))
124 Opcode = TRI.getRegSizeInBits(LoongArch::GPRRegClass) == 32
125 ? LoongArch::ST_W
126 : LoongArch::ST_D;
127 else if (LoongArch::FPR32RegClass.hasSubClassEq(RC))
128 Opcode = LoongArch::FST_S;
129 else if (LoongArch::FPR64RegClass.hasSubClassEq(RC))
130 Opcode = LoongArch::FST_D;
131 else if (LoongArch::LSX128RegClass.hasSubClassEq(RC))
132 Opcode = LoongArch::VST;
133 else if (LoongArch::LASX256RegClass.hasSubClassEq(RC))
134 Opcode = LoongArch::XVST;
135 else if (LoongArch::CFRRegClass.hasSubClassEq(RC))
136 Opcode = LoongArch::PseudoST_CFR;
137 else
138 llvm_unreachable("Can't store this register to stack slot");
139
142 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
143
144 BuildMI(MBB, I, DebugLoc(), get(Opcode))
145 .addReg(SrcReg, getKillRegState(IsKill))
146 .addFrameIndex(FI)
147 .addImm(0)
148 .addMemOperand(MMO);
149}
150
153 int FI, const TargetRegisterClass *RC, Register VReg, unsigned SubReg,
154 MachineInstr::MIFlag Flags) const {
155 MachineFunction *MF = MBB.getParent();
156 MachineFrameInfo &MFI = MF->getFrameInfo();
157 DebugLoc DL;
158 if (I != MBB.end())
159 DL = I->getDebugLoc();
160
161 unsigned Opcode;
162 if (LoongArch::GPRRegClass.hasSubClassEq(RC))
163 Opcode = RegInfo.getRegSizeInBits(LoongArch::GPRRegClass) == 32
164 ? LoongArch::LD_W
165 : LoongArch::LD_D;
166 else if (LoongArch::FPR32RegClass.hasSubClassEq(RC))
167 Opcode = LoongArch::FLD_S;
168 else if (LoongArch::FPR64RegClass.hasSubClassEq(RC))
169 Opcode = LoongArch::FLD_D;
170 else if (LoongArch::LSX128RegClass.hasSubClassEq(RC))
171 Opcode = LoongArch::VLD;
172 else if (LoongArch::LASX256RegClass.hasSubClassEq(RC))
173 Opcode = LoongArch::XVLD;
174 else if (LoongArch::CFRRegClass.hasSubClassEq(RC))
175 Opcode = LoongArch::PseudoLD_CFR;
176 else
177 llvm_unreachable("Can't load this register from stack slot");
178
181 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
182
183 BuildMI(MBB, I, DL, get(Opcode), DstReg)
184 .addFrameIndex(FI)
185 .addImm(0)
186 .addMemOperand(MMO);
187}
188
191 const DebugLoc &DL, Register DstReg,
192 uint64_t Val, MachineInstr::MIFlag Flag) const {
193 Register SrcReg = LoongArch::R0;
194
195 if (!STI.is64Bit() && !isInt<32>(Val))
196 report_fatal_error("Should only materialize 32-bit constants for LA32");
197
198 auto Seq = LoongArchMatInt::generateInstSeq(Val);
199 assert(!Seq.empty());
200
201 for (auto &Inst : Seq) {
202 switch (Inst.Opc) {
203 case LoongArch::LU12I_W:
204 BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
205 .addImm(Inst.Imm)
206 .setMIFlag(Flag);
207 break;
208 case LoongArch::ADDI_W:
209 case LoongArch::ORI:
210 case LoongArch::LU32I_D: // "rj" is needed due to InstrInfo pattern
211 case LoongArch::LU52I_D:
212 BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
213 .addReg(SrcReg, RegState::Kill)
214 .addImm(Inst.Imm)
215 .setMIFlag(Flag);
216 break;
217 case LoongArch::BSTRINS_D:
218 BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
219 .addReg(SrcReg, RegState::Kill)
220 .addReg(SrcReg, RegState::Kill)
221 .addImm(Inst.Imm >> 32)
222 .addImm(Inst.Imm & 0xFF)
223 .setMIFlag(Flag);
224 break;
225 default:
226 assert(false && "Unknown insn emitted by LoongArchMatInt");
227 }
228
229 // Only the first instruction has $zero as its source.
230 SrcReg = DstReg;
231 }
232}
233
235 unsigned Opcode = MI.getOpcode();
236
237 if (Opcode == TargetOpcode::INLINEASM ||
238 Opcode == TargetOpcode::INLINEASM_BR) {
239 const MachineFunction *MF = MI.getParent()->getParent();
240 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
241 return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
242 }
243
244 unsigned NumBytes = 0;
245 const MCInstrDesc &Desc = MI.getDesc();
246
247 // Size should be preferably set in
248 // llvm/lib/Target/LoongArch/LoongArch*InstrInfo.td (default case).
249 // Specific cases handle instructions of variable sizes.
250 switch (Desc.getOpcode()) {
251 default:
252 return Desc.getSize();
253 case TargetOpcode::STATEPOINT:
254 NumBytes = StatepointOpers(&MI).getNumPatchBytes();
255 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
256 // No patch bytes means a normal call inst (i.e. `bl`) is emitted.
257 if (NumBytes == 0)
258 NumBytes = 4;
259 break;
260 }
261 return NumBytes;
262}
263
265 const unsigned Opcode = MI.getOpcode();
266 switch (Opcode) {
267 default:
268 break;
269 case LoongArch::ADDI_D:
270 case LoongArch::ORI:
271 case LoongArch::XORI:
272 return (MI.getOperand(1).isReg() &&
273 MI.getOperand(1).getReg() == LoongArch::R0) ||
274 (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
275 }
276 return MI.isAsCheapAsAMove();
277}
278
281 assert(MI.getDesc().isBranch() && "Unexpected opcode!");
282 // The branch target is always the last operand.
283 return MI.getOperand(MI.getNumExplicitOperands() - 1).getMBB();
284}
285
288 // Block ends with fall-through condbranch.
289 assert(LastInst.getDesc().isConditionalBranch() &&
290 "Unknown conditional branch");
291 int NumOp = LastInst.getNumExplicitOperands();
292 Target = LastInst.getOperand(NumOp - 1).getMBB();
293
294 Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
295 for (int i = 0; i < NumOp - 1; i++)
296 Cond.push_back(LastInst.getOperand(i));
297}
298
301 MachineBasicBlock *&FBB,
303 bool AllowModify) const {
304 TBB = FBB = nullptr;
305 Cond.clear();
306
307 // If the block has no terminators, it just falls into the block after it.
308 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
309 if (I == MBB.end() || !isUnpredicatedTerminator(*I))
310 return false;
311
312 // Count the number of terminators and find the first unconditional or
313 // indirect branch.
314 MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
315 int NumTerminators = 0;
316 for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
317 J++) {
318 NumTerminators++;
319 if (J->getDesc().isUnconditionalBranch() ||
320 J->getDesc().isIndirectBranch()) {
321 FirstUncondOrIndirectBr = J.getReverse();
322 }
323 }
324
325 // If AllowModify is true, we can erase any terminators after
326 // FirstUncondOrIndirectBR.
327 if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
328 while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
329 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
330 NumTerminators--;
331 }
332 I = FirstUncondOrIndirectBr;
333 }
334
335 // Handle a single unconditional branch.
336 if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
338 return false;
339 }
340
341 // Handle a single conditional branch.
342 if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
344 return false;
345 }
346
347 // Handle a conditional branch followed by an unconditional branch.
348 if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
349 I->getDesc().isUnconditionalBranch()) {
350 parseCondBranch(*std::prev(I), TBB, Cond);
351 FBB = getBranchDestBlock(*I);
352 return false;
353 }
354
355 // Otherwise, we can't handle this.
356 return true;
357}
358
360 int64_t BrOffset) const {
361 switch (BranchOp) {
362 default:
363 llvm_unreachable("Unknown branch instruction!");
364 case LoongArch::BEQ:
365 case LoongArch::BNE:
366 case LoongArch::BLT:
367 case LoongArch::BGE:
368 case LoongArch::BLTU:
369 case LoongArch::BGEU:
370 return isInt<18>(BrOffset);
371 case LoongArch::BEQZ:
372 case LoongArch::BNEZ:
373 case LoongArch::BCEQZ:
374 case LoongArch::BCNEZ:
375 return isInt<23>(BrOffset);
376 case LoongArch::B:
377 case LoongArch::PseudoBR:
378 return isInt<28>(BrOffset);
379 }
380}
381
383 const MachineBasicBlock *MBB,
384 const MachineFunction &MF) const {
385 auto MII = MI.getIterator();
386 auto MIE = MBB->end();
387
388 // According to psABI v2.30:
389 //
390 // https://github.com/loongson/la-abi-specs/releases/tag/v2.30
391 //
392 // The following instruction patterns are prohibited from being reordered:
393 //
394 // * pcalau12i $a0, %pc_hi20(s)
395 // addi.d $a1, $zero, %pc_lo12(s)
396 // lu32i.d $a1, %pc64_lo20(s)
397 // lu52i.d $a1, $a1, %pc64_hi12(s)
398 //
399 // * pcalau12i $a0, %got_pc_hi20(s) | %ld_pc_hi20(s) | %gd_pc_hi20(s)
400 // addi.d $a1, $zero, %got_pc_lo12(s)
401 // lu32i.d $a1, %got64_pc_lo20(s)
402 // lu52i.d $a1, $a1, %got64_pc_hi12(s)
403 //
404 // * pcalau12i $a0, %ie_pc_hi20(s)
405 // addi.d $a1, $zero, %ie_pc_lo12(s)
406 // lu32i.d $a1, %ie64_pc_lo20(s)
407 // lu52i.d $a1, $a1, %ie64_pc_hi12(s)
408 //
409 // * pcalau12i $a0, %desc_pc_hi20(s)
410 // addi.d $a1, $zero, %desc_pc_lo12(s)
411 // lu32i.d $a1, %desc64_pc_lo20(s)
412 // lu52i.d $a1, $a1, %desc64_pc_hi12(s)
413 //
414 // For simplicity, only pcalau12i and lu52i.d are marked as scheduling
415 // boundaries, and the instructions between them are guaranteed to be
416 // ordered according to data dependencies.
417 switch (MI.getOpcode()) {
418 case LoongArch::PCALAU12I: {
419 auto AddI = std::next(MII);
420 if (AddI == MIE || AddI->getOpcode() != LoongArch::ADDI_D)
421 break;
422 auto Lu32I = std::next(AddI);
423 if (Lu32I == MIE || Lu32I->getOpcode() != LoongArch::LU32I_D)
424 break;
425 auto MO0 = MI.getOperand(1).getTargetFlags();
426 auto MO1 = AddI->getOperand(2).getTargetFlags();
427 auto MO2 = Lu32I->getOperand(2).getTargetFlags();
430 return false;
432 MO0 == LoongArchII::MO_GD_PC_HI) &&
434 return false;
437 return false;
438 if (MO0 == LoongArchII::MO_DESC_PC_HI &&
441 return false;
442 break;
443 }
444 case LoongArch::LU52I_D: {
445 auto MO = MI.getOperand(2).getTargetFlags();
448 return false;
449 break;
450 }
451 default:
452 break;
453 }
454
455 const auto &STI = MF.getSubtarget<LoongArchSubtarget>();
456 if (STI.hasFeature(LoongArch::FeatureRelax)) {
457 // When linker relaxation enabled, the following instruction patterns are
458 // prohibited from being reordered:
459 //
460 // * pcalau12i $a0, %pc_hi20(s)
461 // addi.w/d $a0, $a0, %pc_lo12(s)
462 //
463 // * pcalau12i $a0, %got_pc_hi20(s)
464 // ld.w/d $a0, $a0, %got_pc_lo12(s)
465 //
466 // * pcalau12i $a0, %ld_pc_hi20(s) | %gd_pc_hi20(s)
467 // addi.w/d $a0, $a0, %got_pc_lo12(s)
468 //
469 // * pcalau12i $a0, %desc_pc_hi20(s)
470 // addi.w/d $a0, $a0, %desc_pc_lo12(s)
471 // ld.w/d $ra, $a0, %desc_ld(s)
472 // jirl $ra, $ra, %desc_call(s)
473 unsigned AddiOp = STI.is64Bit() ? LoongArch::ADDI_D : LoongArch::ADDI_W;
474 unsigned LdOp = STI.is64Bit() ? LoongArch::LD_D : LoongArch::LD_W;
475 switch (MI.getOpcode()) {
476 case LoongArch::PCALAU12I: {
477 auto MO0 = LoongArchII::getDirectFlags(MI.getOperand(1));
478 auto SecondOp = std::next(MII);
479 if (MO0 == LoongArchII::MO_DESC_PC_HI) {
480 if (SecondOp == MIE || SecondOp->getOpcode() != AddiOp)
481 break;
482 auto Ld = std::next(SecondOp);
483 if (Ld == MIE || Ld->getOpcode() != LdOp)
484 break;
485 auto MO1 = LoongArchII::getDirectFlags(SecondOp->getOperand(2));
486 auto MO2 = LoongArchII::getDirectFlags(Ld->getOperand(2));
488 return false;
489 break;
490 }
491 if (SecondOp == MIE ||
492 (SecondOp->getOpcode() != AddiOp && SecondOp->getOpcode() != LdOp))
493 break;
494 auto MO1 = LoongArchII::getDirectFlags(SecondOp->getOperand(2));
495 if (MO0 == LoongArchII::MO_PCREL_HI && SecondOp->getOpcode() == AddiOp &&
497 return false;
498 if (MO0 == LoongArchII::MO_GOT_PC_HI && SecondOp->getOpcode() == LdOp &&
500 return false;
501 if ((MO0 == LoongArchII::MO_LD_PC_HI ||
502 MO0 == LoongArchII::MO_GD_PC_HI) &&
503 SecondOp->getOpcode() == AddiOp && MO1 == LoongArchII::MO_GOT_PC_LO)
504 return false;
505 break;
506 }
507 case LoongArch::ADDI_W:
508 case LoongArch::ADDI_D: {
509 auto MO = LoongArchII::getDirectFlags(MI.getOperand(2));
511 return false;
512 break;
513 }
514 case LoongArch::LD_W:
515 case LoongArch::LD_D: {
516 auto MO = LoongArchII::getDirectFlags(MI.getOperand(2));
518 return false;
519 break;
520 }
521 case LoongArch::PseudoDESC_CALL: {
522 auto MO = LoongArchII::getDirectFlags(MI.getOperand(2));
524 return false;
525 break;
526 }
527 default:
528 break;
529 }
530 }
531
532 return true;
533}
534
536 const MachineBasicBlock *MBB,
537 const MachineFunction &MF) const {
539 return true;
540
541 if (!isSafeToMove(MI, MBB, MF))
542 return true;
543
544 return false;
545}
546
548 int *BytesRemoved) const {
549 if (BytesRemoved)
550 *BytesRemoved = 0;
551 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
552 if (I == MBB.end())
553 return 0;
554
555 if (!I->getDesc().isBranch())
556 return 0;
557
558 // Remove the branch.
559 if (BytesRemoved)
560 *BytesRemoved += getInstSizeInBytes(*I);
561 I->eraseFromParent();
562
563 I = MBB.end();
564
565 if (I == MBB.begin())
566 return 1;
567 --I;
568 if (!I->getDesc().isConditionalBranch())
569 return 1;
570
571 // Remove the branch.
572 if (BytesRemoved)
573 *BytesRemoved += getInstSizeInBytes(*I);
574 I->eraseFromParent();
575 return 2;
576}
577
578// Inserts a branch into the end of the specific MachineBasicBlock, returning
579// the number of instructions inserted.
582 ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
583 if (BytesAdded)
584 *BytesAdded = 0;
585
586 // Shouldn't be a fall through.
587 assert(TBB && "insertBranch must not be told to insert a fallthrough");
588 assert(Cond.size() <= 3 && Cond.size() != 1 &&
589 "LoongArch branch conditions have at most two components!");
590
591 // Unconditional branch.
592 if (Cond.empty()) {
593 MachineInstr &MI = *BuildMI(&MBB, DL, get(LoongArch::PseudoBR)).addMBB(TBB);
594 if (BytesAdded)
595 *BytesAdded += getInstSizeInBytes(MI);
596 return 1;
597 }
598
599 // Either a one or two-way conditional branch.
601 for (unsigned i = 1; i < Cond.size(); ++i)
602 MIB.add(Cond[i]);
603 MIB.addMBB(TBB);
604 if (BytesAdded)
605 *BytesAdded += getInstSizeInBytes(*MIB);
606
607 // One-way conditional branch.
608 if (!FBB)
609 return 1;
610
611 // Two-way conditional branch.
612 MachineInstr &MI = *BuildMI(&MBB, DL, get(LoongArch::PseudoBR)).addMBB(FBB);
613 if (BytesAdded)
614 *BytesAdded += getInstSizeInBytes(MI);
615 return 2;
616}
617
619 MachineBasicBlock &DestBB,
620 MachineBasicBlock &RestoreBB,
621 const DebugLoc &DL,
622 int64_t BrOffset,
623 RegScavenger *RS) const {
624 assert(RS && "RegScavenger required for long branching");
625 assert(MBB.empty() &&
626 "new block should be inserted for expanding unconditional branch");
627 assert(MBB.pred_size() == 1);
628
629 MachineFunction *MF = MBB.getParent();
634 bool Has32S = STI.hasFeature(LoongArch::Feature32S);
635
636 if (!isInt<32>(BrOffset))
638 "Branch offsets outside of the signed 32-bit range not supported");
639
640 Register ScratchReg = MRI.createVirtualRegister(&LoongArch::GPRRegClass);
641 MachineInstr *PCAI = nullptr;
642 MachineInstr *ADDI = nullptr;
643 auto II = MBB.end();
644 unsigned ADDIOp = STI.is64Bit() ? LoongArch::ADDI_D : LoongArch::ADDI_W;
645
646 if (Has32S) {
647 PCAI = BuildMI(MBB, II, DL, get(LoongArch::PCALAU12I), ScratchReg)
649 ADDI = BuildMI(MBB, II, DL, get(ADDIOp), ScratchReg)
650 .addReg(ScratchReg)
652 } else {
653 MCSymbol *PCAddSymbol = MF->getContext().createNamedTempSymbol("pcadd_hi");
654 PCAI = BuildMI(MBB, II, DL, get(LoongArch::PCADDU12I), ScratchReg)
656 PCAI->setPreInstrSymbol(*MF, PCAddSymbol);
657 ADDI = BuildMI(MBB, II, DL, get(ADDIOp), ScratchReg)
658 .addReg(ScratchReg)
659 .addSym(PCAddSymbol, LoongArchII::MO_PCADD_LO);
660 }
661 BuildMI(MBB, II, DL, get(LoongArch::PseudoBRIND))
662 .addReg(ScratchReg, RegState::Kill)
663 .addImm(0);
664
665 RS->enterBasicBlockEnd(MBB);
666 Register Scav = RS->scavengeRegisterBackwards(
667 LoongArch::GPRRegClass, PCAI->getIterator(), /*RestoreAfter=*/false,
668 /*SPAdj=*/0, /*AllowSpill=*/false);
669 if (Scav != LoongArch::NoRegister)
670 RS->setRegUsed(Scav);
671 else {
672 // When there is no scavenged register, it needs to specify a register.
673 // Specify t8 register because it won't be used too often.
674 Scav = LoongArch::R20;
675 int FrameIndex = LAFI->getBranchRelaxationSpillFrameIndex();
676 if (FrameIndex == -1)
677 report_fatal_error("The function size is incorrectly estimated.");
678 storeRegToStackSlot(MBB, PCAI, Scav, /*IsKill=*/true, FrameIndex,
679 &LoongArch::GPRRegClass, Register());
680 TRI->eliminateFrameIndex(std::prev(PCAI->getIterator()),
681 /*SpAdj=*/0, /*FIOperandNum=*/1);
682 PCAI->getOperand(1).setMBB(&RestoreBB);
683 if (Has32S)
684 ADDI->getOperand(2).setMBB(&RestoreBB);
685 loadRegFromStackSlot(RestoreBB, RestoreBB.end(), Scav, FrameIndex,
686 &LoongArch::GPRRegClass, Register());
687 TRI->eliminateFrameIndex(RestoreBB.back(),
688 /*SpAdj=*/0, /*FIOperandNum=*/1);
689 }
690 MRI.replaceRegWith(ScratchReg, Scav);
691 MRI.clearVirtRegs();
692}
693
694static unsigned getOppositeBranchOpc(unsigned Opc) {
695 switch (Opc) {
696 default:
697 llvm_unreachable("Unrecognized conditional branch");
698 case LoongArch::BEQ:
699 return LoongArch::BNE;
700 case LoongArch::BNE:
701 return LoongArch::BEQ;
702 case LoongArch::BEQZ:
703 return LoongArch::BNEZ;
704 case LoongArch::BNEZ:
705 return LoongArch::BEQZ;
706 case LoongArch::BCEQZ:
707 return LoongArch::BCNEZ;
708 case LoongArch::BCNEZ:
709 return LoongArch::BCEQZ;
710 case LoongArch::BLT:
711 return LoongArch::BGE;
712 case LoongArch::BGE:
713 return LoongArch::BLT;
714 case LoongArch::BLTU:
715 return LoongArch::BGEU;
716 case LoongArch::BGEU:
717 return LoongArch::BLTU;
718 }
719}
720
723 assert((Cond.size() && Cond.size() <= 3) && "Invalid branch condition!");
724 Cond[0].setImm(getOppositeBranchOpc(Cond[0].getImm()));
725 return false;
726}
727
728std::pair<unsigned, unsigned>
730 const unsigned Mask = LoongArchII::MO_DIRECT_FLAG_MASK;
731 return std::make_pair(TF & Mask, TF & ~Mask);
732}
733
736 using namespace LoongArchII;
737 // TODO: Add more target flags.
738 static const std::pair<unsigned, const char *> TargetFlags[] = {
739 {MO_CALL, "loongarch-call"},
740 {MO_CALL_PLT, "loongarch-call-plt"},
741 {MO_PCREL_HI, "loongarch-pcrel-hi"},
742 {MO_PCREL_LO, "loongarch-pcrel-lo"},
743 {MO_PCREL64_LO, "loongarch-pcrel64-lo"},
744 {MO_PCREL64_HI, "loongarch-pcrel64-hi"},
745 {MO_GOT_PC_HI, "loongarch-got-pc-hi"},
746 {MO_GOT_PC_LO, "loongarch-got-pc-lo"},
747 {MO_GOT_PC64_LO, "loongarch-got-pc64-lo"},
748 {MO_GOT_PC64_HI, "loongarch-got-pc64-hi"},
749 {MO_LE_HI, "loongarch-le-hi"},
750 {MO_LE_LO, "loongarch-le-lo"},
751 {MO_LE64_LO, "loongarch-le64-lo"},
752 {MO_LE64_HI, "loongarch-le64-hi"},
753 {MO_IE_PC_HI, "loongarch-ie-pc-hi"},
754 {MO_IE_PC_LO, "loongarch-ie-pc-lo"},
755 {MO_IE_PC64_LO, "loongarch-ie-pc64-lo"},
756 {MO_IE_PC64_HI, "loongarch-ie-pc64-hi"},
757 {MO_LD_PC_HI, "loongarch-ld-pc-hi"},
758 {MO_GD_PC_HI, "loongarch-gd-pc-hi"},
759 {MO_CALL30, "loongarch-call30"},
760 {MO_CALL36, "loongarch-call36"},
761 {MO_DESC_PC_HI, "loongarch-desc-pc-hi"},
762 {MO_DESC_PC_LO, "loongarch-desc-pc-lo"},
763 {MO_DESC64_PC_LO, "loongarch-desc64-pc-lo"},
764 {MO_DESC64_PC_HI, "loongarch-desc64-pc-hi"},
765 {MO_DESC_LD, "loongarch-desc-ld"},
766 {MO_DESC_CALL, "loongarch-desc-call"},
767 {MO_LE_HI_R, "loongarch-le-hi-r"},
768 {MO_LE_ADD_R, "loongarch-le-add-r"},
769 {MO_LE_LO_R, "loongarch-le-lo-r"},
770 {MO_PCADD_HI, "loongarch-pcadd-hi"},
771 {MO_PCADD_LO, "loongarch-pcadd-lo"},
772 {MO_GOT_PCADD_HI, "loongarch-got-pcadd-hi"},
773 {MO_GOT_PCADD_LO, "loongarch-got-pcadd-lo"},
774 {MO_IE_PCADD_HI, "loongarch-ie-pcadd-hi"},
775 {MO_IE_PCADD_LO, "loongarch-ie-pcadd-lo"},
776 {MO_LD_PCADD_HI, "loongarch-ld-pcadd-hi"},
777 {MO_LD_PCADD_LO, "loongarch-ld-pcadd-lo"},
778 {MO_GD_PCADD_HI, "loongarch-gd-pcadd-hi"},
779 {MO_GD_PCADD_LO, "loongarch-gd-pcadd-lo"},
780 {MO_DESC_PCADD_HI, "loongarch-pcadd-desc-hi"},
781 {MO_DESC_PCADD_LO, "loongarch-pcadd-desc-lo"}};
782 return ArrayRef(TargetFlags);
783}
784
787 using namespace LoongArchII;
788 static const std::pair<unsigned, const char *> TargetFlags[] = {
789 {MO_RELAX, "loongarch-relax"}};
790 return ArrayRef(TargetFlags);
791}
792
794 Register Reg,
795 const MachineInstr &AddrI,
796 ExtAddrMode &AM) const {
797 enum MemIOffsetType {
798 Imm14Shift2,
799 Imm12,
800 Imm11Shift1,
801 Imm10Shift2,
802 Imm9Shift3,
803 Imm8,
804 Imm8Shift1,
805 Imm8Shift2,
806 Imm8Shift3
807 };
808
809 MemIOffsetType OT;
810 switch (MemI.getOpcode()) {
811 default:
812 return false;
813 case LoongArch::LDPTR_W:
814 case LoongArch::LDPTR_D:
815 case LoongArch::STPTR_W:
816 case LoongArch::STPTR_D:
817 OT = Imm14Shift2;
818 break;
819 case LoongArch::LD_B:
820 case LoongArch::LD_H:
821 case LoongArch::LD_W:
822 case LoongArch::LD_D:
823 case LoongArch::LD_BU:
824 case LoongArch::LD_HU:
825 case LoongArch::LD_WU:
826 case LoongArch::ST_B:
827 case LoongArch::ST_H:
828 case LoongArch::ST_W:
829 case LoongArch::ST_D:
830 case LoongArch::FLD_S:
831 case LoongArch::FLD_D:
832 case LoongArch::FST_S:
833 case LoongArch::FST_D:
834 case LoongArch::VLD:
835 case LoongArch::VST:
836 case LoongArch::XVLD:
837 case LoongArch::XVST:
838 case LoongArch::VLDREPL_B:
839 case LoongArch::XVLDREPL_B:
840 OT = Imm12;
841 break;
842 case LoongArch::VLDREPL_H:
843 case LoongArch::XVLDREPL_H:
844 OT = Imm11Shift1;
845 break;
846 case LoongArch::VLDREPL_W:
847 case LoongArch::XVLDREPL_W:
848 OT = Imm10Shift2;
849 break;
850 case LoongArch::VLDREPL_D:
851 case LoongArch::XVLDREPL_D:
852 OT = Imm9Shift3;
853 break;
854 case LoongArch::VSTELM_B:
855 case LoongArch::XVSTELM_B:
856 OT = Imm8;
857 break;
858 case LoongArch::VSTELM_H:
859 case LoongArch::XVSTELM_H:
860 OT = Imm8Shift1;
861 break;
862 case LoongArch::VSTELM_W:
863 case LoongArch::XVSTELM_W:
864 OT = Imm8Shift2;
865 break;
866 case LoongArch::VSTELM_D:
867 case LoongArch::XVSTELM_D:
868 OT = Imm8Shift3;
869 break;
870 }
871
872 if (MemI.getOperand(0).getReg() == Reg)
873 return false;
874
875 if ((AddrI.getOpcode() != LoongArch::ADDI_W &&
876 AddrI.getOpcode() != LoongArch::ADDI_D) ||
877 !AddrI.getOperand(1).isReg() || !AddrI.getOperand(2).isImm())
878 return false;
879
880 int64_t OldOffset = MemI.getOperand(2).getImm();
881 int64_t Disp = AddrI.getOperand(2).getImm();
882 int64_t NewOffset = OldOffset + Disp;
883 if (!STI.is64Bit())
884 NewOffset = SignExtend64<32>(NewOffset);
885
886 if (!(OT == Imm14Shift2 && isShiftedInt<14, 2>(NewOffset) && STI.hasUAL()) &&
887 !(OT == Imm12 && isInt<12>(NewOffset)) &&
888 !(OT == Imm11Shift1 && isShiftedInt<11, 1>(NewOffset)) &&
889 !(OT == Imm10Shift2 && isShiftedInt<10, 2>(NewOffset)) &&
890 !(OT == Imm9Shift3 && isShiftedInt<9, 3>(NewOffset)) &&
891 !(OT == Imm8 && isInt<8>(NewOffset)) &&
892 !(OT == Imm8Shift1 && isShiftedInt<8, 1>(NewOffset)) &&
893 !(OT == Imm8Shift2 && isShiftedInt<8, 2>(NewOffset)) &&
894 !(OT == Imm8Shift3 && isShiftedInt<8, 3>(NewOffset)))
895 return false;
896
897 AM.BaseReg = AddrI.getOperand(1).getReg();
898 AM.ScaledReg = 0;
899 AM.Scale = 0;
900 AM.Displacement = NewOffset;
902 return true;
903}
904
907 const ExtAddrMode &AM) const {
908 const DebugLoc &DL = MemI.getDebugLoc();
910
911 assert(AM.ScaledReg == 0 && AM.Scale == 0 &&
912 "Addressing mode not supported for folding");
913
914 unsigned MemIOp = MemI.getOpcode();
915 switch (MemIOp) {
916 default:
917 return BuildMI(MBB, MemI, DL, get(MemIOp))
918 .addReg(MemI.getOperand(0).getReg(),
919 MemI.mayLoad() ? RegState::Define : 0)
920 .addReg(AM.BaseReg)
922 .setMemRefs(MemI.memoperands())
923 .setMIFlags(MemI.getFlags());
924 case LoongArch::VSTELM_B:
925 case LoongArch::VSTELM_H:
926 case LoongArch::VSTELM_W:
927 case LoongArch::VSTELM_D:
928 case LoongArch::XVSTELM_B:
929 case LoongArch::XVSTELM_H:
930 case LoongArch::XVSTELM_W:
931 case LoongArch::XVSTELM_D:
932 return BuildMI(MBB, MemI, DL, get(MemIOp))
933 .addReg(MemI.getOperand(0).getReg(), 0)
934 .addReg(AM.BaseReg)
936 .addImm(MemI.getOperand(3).getImm())
937 .setMemRefs(MemI.memoperands())
938 .setMIFlags(MemI.getFlags());
939 }
940}
941
942// Returns true if this is the sext.w pattern, addi.w rd, rs, 0.
944 return MI.getOpcode() == LoongArch::ADDI_W && MI.getOperand(1).isReg() &&
945 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
946}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static unsigned getOppositeBranchOpc(unsigned Opcode)
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
A debug info location.
Definition DebugLoc.h:123
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
const LoongArchSubtarget & STI
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isSafeToMove(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
MCInst getNop() const override
LoongArchInstrInfo(const LoongArchSubtarget &STI)
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
LoongArchMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private Lo...
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
LLVM_ABI MCSymbol * createNamedTempSymbol()
Create a temporary symbol with a unique name whose name cannot be omitted in the symbol table.
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Describe properties that are true of each instruction in the target description file.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
LLVM_ABI void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setMBB(MachineBasicBlock *MBB)
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
MI-level Statepoint operands.
Definition StackMaps.h:159
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
Definition StackMaps.h:208
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
self_iterator getIterator()
Definition ilist_node.h:123
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static unsigned getDirectFlags(const MachineOperand &MO)
InstSeq generateInstSeq(int64_t Val)
bool isSEXT_W(const MachineInstr &MI)
@ Define
Register definition.
@ Kill
The last use of a register.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
Op::Description Desc
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
unsigned getKillRegState(bool B)
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
Definition MathExtras.h:182
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.