LLVM  6.0.0svn
HexagonInstrInfo.cpp
Go to the documentation of this file.
1 //===- HexagonInstrInfo.cpp - Hexagon Instruction Information -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the Hexagon implementation of the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "HexagonInstrInfo.h"
15 #include "Hexagon.h"
16 #include "HexagonFrameLowering.h"
18 #include "HexagonRegisterInfo.h"
19 #include "HexagonSubtarget.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringRef.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/MC/MCAsmInfo.h"
45 #include "llvm/MC/MCInstrDesc.h"
47 #include "llvm/MC/MCRegisterInfo.h"
50 #include "llvm/Support/Debug.h"
55 #include <cassert>
56 #include <cctype>
57 #include <cstdint>
58 #include <cstring>
59 #include <iterator>
60 #include <string>
61 #include <utility>
62 
63 using namespace llvm;
64 
65 #define DEBUG_TYPE "hexagon-instrinfo"
66 
67 #define GET_INSTRINFO_CTOR_DTOR
68 #define GET_INSTRMAP_INFO
70 #include "HexagonGenDFAPacketizer.inc"
71 #include "HexagonGenInstrInfo.inc"
72 
73 cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
74  cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
75  "packetization boundary."));
76 
77 static cl::opt<bool> EnableBranchPrediction("hexagon-enable-branch-prediction",
78  cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"));
79 
80 static cl::opt<bool> DisableNVSchedule("disable-hexagon-nv-schedule",
82  cl::desc("Disable schedule adjustment for new value stores."));
83 
85  "enable-timing-class-latency", cl::Hidden, cl::init(false),
86  cl::desc("Enable timing class latency"));
87 
89  "enable-alu-forwarding", cl::Hidden, cl::init(true),
90  cl::desc("Enable vec alu forwarding"));
91 
93  "enable-acc-forwarding", cl::Hidden, cl::init(true),
94  cl::desc("Enable vec acc forwarding"));
95 
96 static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large",
97  cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("branch relax asm"));
98 
99 static cl::opt<bool> UseDFAHazardRec("dfa-hazard-rec",
101  cl::desc("Use the DFA based hazard recognizer."));
102 
103 /// Constants for Hexagon instructions.
104 const int Hexagon_MEMW_OFFSET_MAX = 4095;
105 const int Hexagon_MEMW_OFFSET_MIN = -4096;
106 const int Hexagon_MEMD_OFFSET_MAX = 8191;
107 const int Hexagon_MEMD_OFFSET_MIN = -8192;
108 const int Hexagon_MEMH_OFFSET_MAX = 2047;
109 const int Hexagon_MEMH_OFFSET_MIN = -2048;
110 const int Hexagon_MEMB_OFFSET_MAX = 1023;
111 const int Hexagon_MEMB_OFFSET_MIN = -1024;
112 const int Hexagon_ADDI_OFFSET_MAX = 32767;
113 const int Hexagon_ADDI_OFFSET_MIN = -32768;
114 
115 // Pin the vtable to this file.
116 void HexagonInstrInfo::anchor() {}
117 
119  : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
120  Subtarget(ST) {}
121 
122 static bool isIntRegForSubInst(unsigned Reg) {
123  return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
124  (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
125 }
126 
127 static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI) {
128  return isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_lo)) &&
129  isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_hi));
130 }
131 
132 /// Calculate number of instructions excluding the debug instructions.
135  unsigned Count = 0;
136  for (; MIB != MIE; ++MIB) {
137  if (!MIB->isDebugValue())
138  ++Count;
139  }
140  return Count;
141 }
142 
143 /// Find the hardware loop instruction used to set-up the specified loop.
144 /// On Hexagon, we have two instructions used to set-up the hardware loop
145 /// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions
146 /// to indicate the end of a loop.
147 static MachineInstr *findLoopInstr(MachineBasicBlock *BB, unsigned EndLoopOp,
148  MachineBasicBlock *TargetBB,
150  unsigned LOOPi;
151  unsigned LOOPr;
152  if (EndLoopOp == Hexagon::ENDLOOP0) {
153  LOOPi = Hexagon::J2_loop0i;
154  LOOPr = Hexagon::J2_loop0r;
155  } else { // EndLoopOp == Hexagon::EndLOOP1
156  LOOPi = Hexagon::J2_loop1i;
157  LOOPr = Hexagon::J2_loop1r;
158  }
159 
160  // The loop set-up instruction will be in a predecessor block
161  for (MachineBasicBlock *PB : BB->predecessors()) {
162  // If this has been visited, already skip it.
163  if (!Visited.insert(PB).second)
164  continue;
165  if (PB == BB)
166  continue;
167  for (auto I = PB->instr_rbegin(), E = PB->instr_rend(); I != E; ++I) {
168  unsigned Opc = I->getOpcode();
169  if (Opc == LOOPi || Opc == LOOPr)
170  return &*I;
171  // We've reached a different loop, which means the loop01 has been
172  // removed.
173  if (Opc == EndLoopOp && I->getOperand(0).getMBB() != TargetBB)
174  return nullptr;
175  }
176  // Check the predecessors for the LOOP instruction.
177  if (MachineInstr *Loop = findLoopInstr(PB, EndLoopOp, TargetBB, Visited))
178  return Loop;
179  }
180  return nullptr;
181 }
182 
183 /// Gather register def/uses from MI.
184 /// This treats possible (predicated) defs as actually happening ones
185 /// (conservatively).
186 static inline void parseOperands(const MachineInstr &MI,
188  Defs.clear();
189  Uses.clear();
190 
191  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
192  const MachineOperand &MO = MI.getOperand(i);
193 
194  if (!MO.isReg())
195  continue;
196 
197  unsigned Reg = MO.getReg();
198  if (!Reg)
199  continue;
200 
201  if (MO.isUse())
202  Uses.push_back(MO.getReg());
203 
204  if (MO.isDef())
205  Defs.push_back(MO.getReg());
206  }
207 }
208 
209 // Position dependent, so check twice for swap.
210 static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) {
211  switch (Ga) {
213  default:
214  return false;
215  case HexagonII::HSIG_L1:
216  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_A);
217  case HexagonII::HSIG_L2:
218  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
219  Gb == HexagonII::HSIG_A);
220  case HexagonII::HSIG_S1:
221  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
222  Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_A);
223  case HexagonII::HSIG_S2:
224  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
225  Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_S2 ||
226  Gb == HexagonII::HSIG_A);
227  case HexagonII::HSIG_A:
228  return (Gb == HexagonII::HSIG_A);
230  return (Gb == HexagonII::HSIG_Compound);
231  }
232  return false;
233 }
234 
235 /// isLoadFromStackSlot - If the specified machine instruction is a direct
236 /// load from a stack slot, return the virtual or physical register number of
237 /// the destination along with the FrameIndex of the loaded stack slot. If
238 /// not, return 0. This predicate must return 0 if the instruction has
239 /// any side effects other than loading from the stack slot.
241  int &FrameIndex) const {
242  switch (MI.getOpcode()) {
243  default:
244  break;
245  case Hexagon::L2_loadri_io:
246  case Hexagon::L2_loadrd_io:
247  case Hexagon::V6_vL32b_ai:
248  case Hexagon::V6_vL32b_nt_ai:
249  case Hexagon::V6_vL32Ub_ai:
250  case Hexagon::LDriw_pred:
251  case Hexagon::LDriw_mod:
252  case Hexagon::PS_vloadrq_ai:
253  case Hexagon::PS_vloadrw_ai:
254  case Hexagon::PS_vloadrw_nt_ai: {
255  const MachineOperand OpFI = MI.getOperand(1);
256  if (!OpFI.isFI())
257  return 0;
258  const MachineOperand OpOff = MI.getOperand(2);
259  if (!OpOff.isImm() || OpOff.getImm() != 0)
260  return 0;
261  FrameIndex = OpFI.getIndex();
262  return MI.getOperand(0).getReg();
263  }
264 
265  case Hexagon::L2_ploadrit_io:
266  case Hexagon::L2_ploadrif_io:
267  case Hexagon::L2_ploadrdt_io:
268  case Hexagon::L2_ploadrdf_io: {
269  const MachineOperand OpFI = MI.getOperand(2);
270  if (!OpFI.isFI())
271  return 0;
272  const MachineOperand OpOff = MI.getOperand(3);
273  if (!OpOff.isImm() || OpOff.getImm() != 0)
274  return 0;
275  FrameIndex = OpFI.getIndex();
276  return MI.getOperand(0).getReg();
277  }
278  }
279 
280  return 0;
281 }
282 
283 /// isStoreToStackSlot - If the specified machine instruction is a direct
284 /// store to a stack slot, return the virtual or physical register number of
285 /// the source reg along with the FrameIndex of the loaded stack slot. If
286 /// not, return 0. This predicate must return 0 if the instruction has
287 /// any side effects other than storing to the stack slot.
289  int &FrameIndex) const {
290  switch (MI.getOpcode()) {
291  default:
292  break;
293  case Hexagon::S2_storerb_io:
294  case Hexagon::S2_storerh_io:
295  case Hexagon::S2_storeri_io:
296  case Hexagon::S2_storerd_io:
297  case Hexagon::V6_vS32b_ai:
298  case Hexagon::V6_vS32Ub_ai:
299  case Hexagon::STriw_pred:
300  case Hexagon::STriw_mod:
301  case Hexagon::PS_vstorerq_ai:
302  case Hexagon::PS_vstorerw_ai: {
303  const MachineOperand &OpFI = MI.getOperand(0);
304  if (!OpFI.isFI())
305  return 0;
306  const MachineOperand &OpOff = MI.getOperand(1);
307  if (!OpOff.isImm() || OpOff.getImm() != 0)
308  return 0;
309  FrameIndex = OpFI.getIndex();
310  return MI.getOperand(2).getReg();
311  }
312 
313  case Hexagon::S2_pstorerbt_io:
314  case Hexagon::S2_pstorerbf_io:
315  case Hexagon::S2_pstorerht_io:
316  case Hexagon::S2_pstorerhf_io:
317  case Hexagon::S2_pstorerit_io:
318  case Hexagon::S2_pstorerif_io:
319  case Hexagon::S2_pstorerdt_io:
320  case Hexagon::S2_pstorerdf_io: {
321  const MachineOperand &OpFI = MI.getOperand(1);
322  if (!OpFI.isFI())
323  return 0;
324  const MachineOperand &OpOff = MI.getOperand(2);
325  if (!OpOff.isImm() || OpOff.getImm() != 0)
326  return 0;
327  FrameIndex = OpFI.getIndex();
328  return MI.getOperand(3).getReg();
329  }
330  }
331 
332  return 0;
333 }
334 
335 /// This function can analyze one/two way branching only and should (mostly) be
336 /// called by target independent side.
337 /// First entry is always the opcode of the branching instruction, except when
338 /// the Cond vector is supposed to be empty, e.g., when AnalyzeBranch fails, a
339 /// BB with only unconditional jump. Subsequent entries depend upon the opcode,
340 /// e.g. Jump_c p will have
341 /// Cond[0] = Jump_c
342 /// Cond[1] = p
343 /// HW-loop ENDLOOP:
344 /// Cond[0] = ENDLOOP
345 /// Cond[1] = MBB
346 /// New value jump:
347 /// Cond[0] = Hexagon::CMPEQri_f_Jumpnv_t_V4 -- specific opcode
348 /// Cond[1] = R
349 /// Cond[2] = Imm
351  MachineBasicBlock *&TBB,
352  MachineBasicBlock *&FBB,
354  bool AllowModify) const {
355  TBB = nullptr;
356  FBB = nullptr;
357  Cond.clear();
358 
359  // If the block has no terminators, it just falls into the block after it.
361  if (I == MBB.instr_begin())
362  return false;
363 
364  // A basic block may looks like this:
365  //
366  // [ insn
367  // EH_LABEL
368  // insn
369  // insn
370  // insn
371  // EH_LABEL
372  // insn ]
373  //
374  // It has two succs but does not have a terminator
375  // Don't know how to handle it.
376  do {
377  --I;
378  if (I->isEHLabel())
379  // Don't analyze EH branches.
380  return true;
381  } while (I != MBB.instr_begin());
382 
383  I = MBB.instr_end();
384  --I;
385 
386  while (I->isDebugValue()) {
387  if (I == MBB.instr_begin())
388  return false;
389  --I;
390  }
391 
392  bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump &&
393  I->getOperand(0).isMBB();
394  // Delete the J2_jump if it's equivalent to a fall-through.
395  if (AllowModify && JumpToBlock &&
396  MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
397  DEBUG(dbgs() << "\nErasing the jump to successor block\n";);
398  I->eraseFromParent();
399  I = MBB.instr_end();
400  if (I == MBB.instr_begin())
401  return false;
402  --I;
403  }
404  if (!isUnpredicatedTerminator(*I))
405  return false;
406 
407  // Get the last instruction in the block.
408  MachineInstr *LastInst = &*I;
409  MachineInstr *SecondLastInst = nullptr;
410  // Find one more terminator if present.
411  while (true) {
412  if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
413  if (!SecondLastInst)
414  SecondLastInst = &*I;
415  else
416  // This is a third branch.
417  return true;
418  }
419  if (I == MBB.instr_begin())
420  break;
421  --I;
422  }
423 
424  int LastOpcode = LastInst->getOpcode();
425  int SecLastOpcode = SecondLastInst ? SecondLastInst->getOpcode() : 0;
426  // If the branch target is not a basic block, it could be a tail call.
427  // (It is, if the target is a function.)
428  if (LastOpcode == Hexagon::J2_jump && !LastInst->getOperand(0).isMBB())
429  return true;
430  if (SecLastOpcode == Hexagon::J2_jump &&
431  !SecondLastInst->getOperand(0).isMBB())
432  return true;
433 
434  bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
435  bool LastOpcodeHasNVJump = isNewValueJump(*LastInst);
436 
437  if (LastOpcodeHasJMP_c && !LastInst->getOperand(1).isMBB())
438  return true;
439 
440  // If there is only one terminator instruction, process it.
441  if (LastInst && !SecondLastInst) {
442  if (LastOpcode == Hexagon::J2_jump) {
443  TBB = LastInst->getOperand(0).getMBB();
444  return false;
445  }
446  if (isEndLoopN(LastOpcode)) {
447  TBB = LastInst->getOperand(0).getMBB();
448  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
449  Cond.push_back(LastInst->getOperand(0));
450  return false;
451  }
452  if (LastOpcodeHasJMP_c) {
453  TBB = LastInst->getOperand(1).getMBB();
454  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
455  Cond.push_back(LastInst->getOperand(0));
456  return false;
457  }
458  // Only supporting rr/ri versions of new-value jumps.
459  if (LastOpcodeHasNVJump && (LastInst->getNumExplicitOperands() == 3)) {
460  TBB = LastInst->getOperand(2).getMBB();
461  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
462  Cond.push_back(LastInst->getOperand(0));
463  Cond.push_back(LastInst->getOperand(1));
464  return false;
465  }
466  DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber()
467  << " with one jump\n";);
468  // Otherwise, don't know what this is.
469  return true;
470  }
471 
472  bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
473  bool SecLastOpcodeHasNVJump = isNewValueJump(*SecondLastInst);
474  if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
475  if (!SecondLastInst->getOperand(1).isMBB())
476  return true;
477  TBB = SecondLastInst->getOperand(1).getMBB();
478  Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
479  Cond.push_back(SecondLastInst->getOperand(0));
480  FBB = LastInst->getOperand(0).getMBB();
481  return false;
482  }
483 
484  // Only supporting rr/ri versions of new-value jumps.
485  if (SecLastOpcodeHasNVJump &&
486  (SecondLastInst->getNumExplicitOperands() == 3) &&
487  (LastOpcode == Hexagon::J2_jump)) {
488  TBB = SecondLastInst->getOperand(2).getMBB();
489  Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
490  Cond.push_back(SecondLastInst->getOperand(0));
491  Cond.push_back(SecondLastInst->getOperand(1));
492  FBB = LastInst->getOperand(0).getMBB();
493  return false;
494  }
495 
496  // If the block ends with two Hexagon:JMPs, handle it. The second one is not
497  // executed, so remove it.
498  if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
499  TBB = SecondLastInst->getOperand(0).getMBB();
500  I = LastInst->getIterator();
501  if (AllowModify)
502  I->eraseFromParent();
503  return false;
504  }
505 
506  // If the block ends with an ENDLOOP, and J2_jump, handle it.
507  if (isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
508  TBB = SecondLastInst->getOperand(0).getMBB();
509  Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
510  Cond.push_back(SecondLastInst->getOperand(0));
511  FBB = LastInst->getOperand(0).getMBB();
512  return false;
513  }
514  DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber()
515  << " with two jumps";);
516  // Otherwise, can't handle this.
517  return true;
518 }
519 
521  int *BytesRemoved) const {
522  assert(!BytesRemoved && "code size not handled");
523 
524  DEBUG(dbgs() << "\nRemoving branches out of BB#" << MBB.getNumber());
526  unsigned Count = 0;
527  while (I != MBB.begin()) {
528  --I;
529  if (I->isDebugValue())
530  continue;
531  // Only removing branches from end of MBB.
532  if (!I->isBranch())
533  return Count;
534  if (Count && (I->getOpcode() == Hexagon::J2_jump))
535  llvm_unreachable("Malformed basic block: unconditional branch not last");
536  MBB.erase(&MBB.back());
537  I = MBB.end();
538  ++Count;
539  }
540  return Count;
541 }
542 
544  MachineBasicBlock *TBB,
545  MachineBasicBlock *FBB,
547  const DebugLoc &DL,
548  int *BytesAdded) const {
549  unsigned BOpc = Hexagon::J2_jump;
550  unsigned BccOpc = Hexagon::J2_jumpt;
551  assert(validateBranchCond(Cond) && "Invalid branching condition");
552  assert(TBB && "insertBranch must not be told to insert a fallthrough");
553  assert(!BytesAdded && "code size not handled");
554 
555  // Check if reverseBranchCondition has asked to reverse this branch
556  // If we want to reverse the branch an odd number of times, we want
557  // J2_jumpf.
558  if (!Cond.empty() && Cond[0].isImm())
559  BccOpc = Cond[0].getImm();
560 
561  if (!FBB) {
562  if (Cond.empty()) {
563  // Due to a bug in TailMerging/CFG Optimization, we need to add a
564  // special case handling of a predicated jump followed by an
565  // unconditional jump. If not, Tail Merging and CFG Optimization go
566  // into an infinite loop.
567  MachineBasicBlock *NewTBB, *NewFBB;
569  auto Term = MBB.getFirstTerminator();
570  if (Term != MBB.end() && isPredicated(*Term) &&
571  !analyzeBranch(MBB, NewTBB, NewFBB, Cond, false) &&
572  MachineFunction::iterator(NewTBB) == ++MBB.getIterator()) {
574  removeBranch(MBB);
575  return insertBranch(MBB, TBB, nullptr, Cond, DL);
576  }
577  BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
578  } else if (isEndLoopN(Cond[0].getImm())) {
579  int EndLoopOp = Cond[0].getImm();
580  assert(Cond[1].isMBB());
581  // Since we're adding an ENDLOOP, there better be a LOOP instruction.
582  // Check for it, and change the BB target if needed.
584  MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
585  VisitedBBs);
586  assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP");
587  Loop->getOperand(0).setMBB(TBB);
588  // Add the ENDLOOP after the finding the LOOP0.
589  BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
590  } else if (isNewValueJump(Cond[0].getImm())) {
591  assert((Cond.size() == 3) && "Only supporting rr/ri version of nvjump");
592  // New value jump
593  // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
594  // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
595  unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
596  DEBUG(dbgs() << "\nInserting NVJump for BB#" << MBB.getNumber(););
597  if (Cond[2].isReg()) {
598  unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
599  BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
600  addReg(Cond[2].getReg(), Flags2).addMBB(TBB);
601  } else if(Cond[2].isImm()) {
602  BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
603  addImm(Cond[2].getImm()).addMBB(TBB);
604  } else
605  llvm_unreachable("Invalid condition for branching");
606  } else {
607  assert((Cond.size() == 2) && "Malformed cond vector");
608  const MachineOperand &RO = Cond[1];
609  unsigned Flags = getUndefRegState(RO.isUndef());
610  BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
611  }
612  return 1;
613  }
614  assert((!Cond.empty()) &&
615  "Cond. cannot be empty when multiple branchings are required");
616  assert((!isNewValueJump(Cond[0].getImm())) &&
617  "NV-jump cannot be inserted with another branch");
618  // Special case for hardware loops. The condition is a basic block.
619  if (isEndLoopN(Cond[0].getImm())) {
620  int EndLoopOp = Cond[0].getImm();
621  assert(Cond[1].isMBB());
622  // Since we're adding an ENDLOOP, there better be a LOOP instruction.
623  // Check for it, and change the BB target if needed.
625  MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
626  VisitedBBs);
627  assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP");
628  Loop->getOperand(0).setMBB(TBB);
629  // Add the ENDLOOP after the finding the LOOP0.
630  BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
631  } else {
632  const MachineOperand &RO = Cond[1];
633  unsigned Flags = getUndefRegState(RO.isUndef());
634  BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
635  }
636  BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
637 
638  return 2;
639 }
640 
641 /// Analyze the loop code to find the loop induction variable and compare used
642 /// to compute the number of iterations. Currently, we analyze loop that are
643 /// controlled using hardware loops. In this case, the induction variable
644 /// instruction is null. For all other cases, this function returns true, which
645 /// means we're unable to analyze it.
647  MachineInstr *&IndVarInst,
648  MachineInstr *&CmpInst) const {
649 
650  MachineBasicBlock *LoopEnd = L.getBottomBlock();
652  // We really "analyze" only hardware loops right now.
653  if (I != LoopEnd->end() && isEndLoopN(I->getOpcode())) {
654  IndVarInst = nullptr;
655  CmpInst = &*I;
656  return false;
657  }
658  return true;
659 }
660 
661 /// Generate code to reduce the loop iteration by one and check if the loop is
662 /// finished. Return the value/register of the new loop count. this function
663 /// assumes the nth iteration is peeled first.
665  MachineInstr *IndVar, MachineInstr &Cmp,
668  unsigned Iter, unsigned MaxIter) const {
669  // We expect a hardware loop currently. This means that IndVar is set
670  // to null, and the compare is the ENDLOOP instruction.
671  assert((!IndVar) && isEndLoopN(Cmp.getOpcode())
672  && "Expecting a hardware loop");
673  MachineFunction *MF = MBB.getParent();
674  DebugLoc DL = Cmp.getDebugLoc();
676  MachineInstr *Loop = findLoopInstr(&MBB, Cmp.getOpcode(),
677  Cmp.getOperand(0).getMBB(), VisitedBBs);
678  if (!Loop)
679  return 0;
680  // If the loop trip count is a compile-time value, then just change the
681  // value.
682  if (Loop->getOpcode() == Hexagon::J2_loop0i ||
683  Loop->getOpcode() == Hexagon::J2_loop1i) {
684  int64_t Offset = Loop->getOperand(1).getImm();
685  if (Offset <= 1)
686  Loop->eraseFromParent();
687  else
688  Loop->getOperand(1).setImm(Offset - 1);
689  return Offset - 1;
690  }
691  // The loop trip count is a run-time value. We generate code to subtract
692  // one from the trip count, and update the loop instruction.
693  assert(Loop->getOpcode() == Hexagon::J2_loop0r && "Unexpected instruction");
694  unsigned LoopCount = Loop->getOperand(1).getReg();
695  // Check if we're done with the loop.
696  unsigned LoopEnd = createVR(MF, MVT::i1);
697  MachineInstr *NewCmp = BuildMI(&MBB, DL, get(Hexagon::C2_cmpgtui), LoopEnd).
698  addReg(LoopCount).addImm(1);
699  unsigned NewLoopCount = createVR(MF, MVT::i32);
700  MachineInstr *NewAdd = BuildMI(&MBB, DL, get(Hexagon::A2_addi), NewLoopCount).
701  addReg(LoopCount).addImm(-1);
702  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
703  // Update the previously generated instructions with the new loop counter.
705  E = PrevInsts.end(); I != E; ++I)
706  (*I)->substituteRegister(LoopCount, NewLoopCount, 0, HRI);
707  PrevInsts.clear();
708  PrevInsts.push_back(NewCmp);
709  PrevInsts.push_back(NewAdd);
710  // Insert the new loop instruction if this is the last time the loop is
711  // decremented.
712  if (Iter == MaxIter)
713  BuildMI(&MBB, DL, get(Hexagon::J2_loop0r)).
714  addMBB(Loop->getOperand(0).getMBB()).addReg(NewLoopCount);
715  // Delete the old loop instruction.
716  if (Iter == 0)
717  Loop->eraseFromParent();
718  Cond.push_back(MachineOperand::CreateImm(Hexagon::J2_jumpf));
719  Cond.push_back(NewCmp->getOperand(0));
720  return NewLoopCount;
721 }
722 
724  unsigned NumCycles, unsigned ExtraPredCycles,
725  BranchProbability Probability) const {
726  return nonDbgBBSize(&MBB) <= 3;
727 }
728 
730  unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB,
731  unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability)
732  const {
733  return nonDbgBBSize(&TMBB) <= 3 && nonDbgBBSize(&FMBB) <= 3;
734 }
735 
737  unsigned NumInstrs, BranchProbability Probability) const {
738  return NumInstrs <= 4;
739 }
740 
743  const DebugLoc &DL, unsigned DestReg,
744  unsigned SrcReg, bool KillSrc) const {
745  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
746  unsigned KillFlag = getKillRegState(KillSrc);
747 
748  if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
749  BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg)
750  .addReg(SrcReg, KillFlag);
751  return;
752  }
753  if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
754  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg)
755  .addReg(SrcReg, KillFlag);
756  return;
757  }
758  if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
759  // Map Pd = Ps to Pd = or(Ps, Ps).
760  BuildMI(MBB, I, DL, get(Hexagon::C2_or), DestReg)
761  .addReg(SrcReg).addReg(SrcReg, KillFlag);
762  return;
763  }
764  if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
765  Hexagon::IntRegsRegClass.contains(SrcReg)) {
766  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
767  .addReg(SrcReg, KillFlag);
768  return;
769  }
770  if (Hexagon::IntRegsRegClass.contains(DestReg) &&
771  Hexagon::CtrRegsRegClass.contains(SrcReg)) {
772  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrcrr), DestReg)
773  .addReg(SrcReg, KillFlag);
774  return;
775  }
776  if (Hexagon::ModRegsRegClass.contains(DestReg) &&
777  Hexagon::IntRegsRegClass.contains(SrcReg)) {
778  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
779  .addReg(SrcReg, KillFlag);
780  return;
781  }
782  if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
783  Hexagon::IntRegsRegClass.contains(DestReg)) {
784  BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
785  .addReg(SrcReg, KillFlag);
786  return;
787  }
788  if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
789  Hexagon::PredRegsRegClass.contains(DestReg)) {
790  BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg)
791  .addReg(SrcReg, KillFlag);
792  return;
793  }
794  if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
795  Hexagon::IntRegsRegClass.contains(DestReg)) {
796  BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
797  .addReg(SrcReg, KillFlag);
798  return;
799  }
800  if (Hexagon::HvxVRRegClass.contains(SrcReg, DestReg)) {
801  BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
802  addReg(SrcReg, KillFlag);
803  return;
804  }
805  if (Hexagon::HvxWRRegClass.contains(SrcReg, DestReg)) {
806  unsigned LoSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
807  unsigned HiSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
808  BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg)
809  .addReg(HiSrc, KillFlag)
810  .addReg(LoSrc, KillFlag);
811  return;
812  }
813  if (Hexagon::HvxQRRegClass.contains(SrcReg, DestReg)) {
814  BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg)
815  .addReg(SrcReg)
816  .addReg(SrcReg, KillFlag);
817  return;
818  }
819  if (Hexagon::HvxQRRegClass.contains(SrcReg) &&
820  Hexagon::HvxVRRegClass.contains(DestReg)) {
821  llvm_unreachable("Unimplemented pred to vec");
822  return;
823  }
824  if (Hexagon::HvxQRRegClass.contains(DestReg) &&
825  Hexagon::HvxVRRegClass.contains(SrcReg)) {
826  llvm_unreachable("Unimplemented vec to pred");
827  return;
828  }
829 
830 #ifndef NDEBUG
831  // Show the invalid registers to ease debugging.
832  dbgs() << "Invalid registers for copy in BB#" << MBB.getNumber()
833  << ": " << PrintReg(DestReg, &HRI)
834  << " = " << PrintReg(SrcReg, &HRI) << '\n';
835 #endif
836  llvm_unreachable("Unimplemented");
837 }
838 
840  MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI,
841  const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const {
842  DebugLoc DL = MBB.findDebugLoc(I);
843  MachineFunction &MF = *MBB.getParent();
844  MachineFrameInfo &MFI = MF.getFrameInfo();
845  unsigned SlotAlign = MFI.getObjectAlignment(FI);
846  unsigned RegAlign = TRI->getSpillAlignment(*RC);
847  unsigned KillFlag = getKillRegState(isKill);
848  bool HasAlloca = MFI.hasVarSizedObjects();
849  const HexagonFrameLowering &HFI = *Subtarget.getFrameLowering();
850 
853  MFI.getObjectSize(FI), SlotAlign);
854 
855  if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
856  BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
857  .addFrameIndex(FI).addImm(0)
858  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
859  } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
860  BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
861  .addFrameIndex(FI).addImm(0)
862  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
863  } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
864  BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
865  .addFrameIndex(FI).addImm(0)
866  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
867  } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
868  BuildMI(MBB, I, DL, get(Hexagon::STriw_mod))
869  .addFrameIndex(FI).addImm(0)
870  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
871  } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
872  BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai))
873  .addFrameIndex(FI).addImm(0)
874  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
875  } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
876  // If there are variable-sized objects, spills will not be aligned.
877  if (HasAlloca)
878  SlotAlign = HFI.getStackAlignment();
879  unsigned Opc = SlotAlign < RegAlign ? Hexagon::V6_vS32Ub_ai
880  : Hexagon::V6_vS32b_ai;
883  MFI.getObjectSize(FI), SlotAlign);
884  BuildMI(MBB, I, DL, get(Opc))
885  .addFrameIndex(FI).addImm(0)
886  .addReg(SrcReg, KillFlag).addMemOperand(MMOA);
887  } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
888  // If there are variable-sized objects, spills will not be aligned.
889  if (HasAlloca)
890  SlotAlign = HFI.getStackAlignment();
891  unsigned Opc = SlotAlign < RegAlign ? Hexagon::PS_vstorerwu_ai
892  : Hexagon::PS_vstorerw_ai;
895  MFI.getObjectSize(FI), SlotAlign);
896  BuildMI(MBB, I, DL, get(Opc))
897  .addFrameIndex(FI).addImm(0)
898  .addReg(SrcReg, KillFlag).addMemOperand(MMOA);
899  } else {
900  llvm_unreachable("Unimplemented");
901  }
902 }
903 
905  MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg,
906  int FI, const TargetRegisterClass *RC,
907  const TargetRegisterInfo *TRI) const {
908  DebugLoc DL = MBB.findDebugLoc(I);
909  MachineFunction &MF = *MBB.getParent();
910  MachineFrameInfo &MFI = MF.getFrameInfo();
911  unsigned SlotAlign = MFI.getObjectAlignment(FI);
912  unsigned RegAlign = TRI->getSpillAlignment(*RC);
913  bool HasAlloca = MFI.hasVarSizedObjects();
914  const HexagonFrameLowering &HFI = *Subtarget.getFrameLowering();
915 
918  MFI.getObjectSize(FI), SlotAlign);
919 
920  if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
921  BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
922  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
923  } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
924  BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
925  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
926  } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
927  BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
928  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
929  } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
930  BuildMI(MBB, I, DL, get(Hexagon::LDriw_mod), DestReg)
931  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
932  } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
933  BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg)
934  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
935  } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
936  // If there are variable-sized objects, spills will not be aligned.
937  if (HasAlloca)
938  SlotAlign = HFI.getStackAlignment();
939  unsigned Opc = SlotAlign < RegAlign ? Hexagon::V6_vL32Ub_ai
940  : Hexagon::V6_vL32b_ai;
943  MFI.getObjectSize(FI), SlotAlign);
944  BuildMI(MBB, I, DL, get(Opc), DestReg)
945  .addFrameIndex(FI).addImm(0).addMemOperand(MMOA);
946  } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
947  // If there are variable-sized objects, spills will not be aligned.
948  if (HasAlloca)
949  SlotAlign = HFI.getStackAlignment();
950  unsigned Opc = SlotAlign < RegAlign ? Hexagon::PS_vloadrwu_ai
951  : Hexagon::PS_vloadrw_ai;
954  MFI.getObjectSize(FI), SlotAlign);
955  BuildMI(MBB, I, DL, get(Opc), DestReg)
956  .addFrameIndex(FI).addImm(0).addMemOperand(MMOA);
957  } else {
958  llvm_unreachable("Can't store this register to stack slot");
959  }
960 }
961 
963  const MachineBasicBlock &B = *MI.getParent();
964  Regs.addLiveOuts(B);
965  auto E = ++MachineBasicBlock::const_iterator(MI.getIterator()).getReverse();
966  for (auto I = B.rbegin(); I != E; ++I)
967  Regs.stepBackward(*I);
968 }
969 
970 /// expandPostRAPseudo - This function is called for all pseudo instructions
971 /// that remain after register allocation. Many pseudo instructions are
972 /// created to help register allocation. This is the place to convert them
973 /// into real instructions. The target can edit MI in place, or it can insert
974 /// new instructions and erase MI. The function should return true if
975 /// anything was changed.
977  MachineBasicBlock &MBB = *MI.getParent();
978  MachineFunction &MF = *MBB.getParent();
980  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
981  DebugLoc DL = MI.getDebugLoc();
982  unsigned Opc = MI.getOpcode();
983 
984  switch (Opc) {
985  case TargetOpcode::COPY: {
986  MachineOperand &MD = MI.getOperand(0);
987  MachineOperand &MS = MI.getOperand(1);
989  if (MD.getReg() != MS.getReg() && !MS.isUndef()) {
990  copyPhysReg(MBB, MI, DL, MD.getReg(), MS.getReg(), MS.isKill());
991  std::prev(MBBI)->copyImplicitOps(*MBB.getParent(), MI);
992  }
993  MBB.erase(MBBI);
994  return true;
995  }
996  case Hexagon::PS_aligna:
997  BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI.getOperand(0).getReg())
998  .addReg(HRI.getFrameRegister())
999  .addImm(-MI.getOperand(1).getImm());
1000  MBB.erase(MI);
1001  return true;
1002  case Hexagon::V6_vassignp: {
1003  unsigned SrcReg = MI.getOperand(1).getReg();
1004  unsigned DstReg = MI.getOperand(0).getReg();
1005  unsigned Kill = getKillRegState(MI.getOperand(1).isKill());
1006  BuildMI(MBB, MI, DL, get(Hexagon::V6_vcombine), DstReg)
1007  .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi), Kill)
1008  .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_lo), Kill);
1009  MBB.erase(MI);
1010  return true;
1011  }
1012  case Hexagon::V6_lo: {
1013  unsigned SrcReg = MI.getOperand(1).getReg();
1014  unsigned DstReg = MI.getOperand(0).getReg();
1015  unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1016  copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill());
1017  MBB.erase(MI);
1018  MRI.clearKillFlags(SrcSubLo);
1019  return true;
1020  }
1021  case Hexagon::V6_hi: {
1022  unsigned SrcReg = MI.getOperand(1).getReg();
1023  unsigned DstReg = MI.getOperand(0).getReg();
1024  unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1025  copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill());
1026  MBB.erase(MI);
1027  MRI.clearKillFlags(SrcSubHi);
1028  return true;
1029  }
1030  case Hexagon::PS_vstorerw_ai:
1031  case Hexagon::PS_vstorerwu_ai: {
1032  bool Aligned = Opc == Hexagon::PS_vstorerw_ai;
1033  unsigned SrcReg = MI.getOperand(2).getReg();
1034  unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1035  unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1036  unsigned NewOpc = Aligned ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai;
1037  unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1038 
1039  MachineInstr *MI1New =
1040  BuildMI(MBB, MI, DL, get(NewOpc))
1041  .add(MI.getOperand(0))
1042  .addImm(MI.getOperand(1).getImm())
1043  .addReg(SrcSubLo)
1045  MI1New->getOperand(0).setIsKill(false);
1046  BuildMI(MBB, MI, DL, get(NewOpc))
1047  .add(MI.getOperand(0))
1048  // The Vectors are indexed in multiples of vector size.
1049  .addImm(MI.getOperand(1).getImm() + Offset)
1050  .addReg(SrcSubHi)
1052  MBB.erase(MI);
1053  return true;
1054  }
1055  case Hexagon::PS_vloadrw_ai:
1056  case Hexagon::PS_vloadrwu_ai: {
1057  bool Aligned = Opc == Hexagon::PS_vloadrw_ai;
1058  unsigned DstReg = MI.getOperand(0).getReg();
1059  unsigned NewOpc = Aligned ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32Ub_ai;
1060  unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1061 
1062  MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc),
1063  HRI.getSubReg(DstReg, Hexagon::vsub_lo))
1064  .add(MI.getOperand(1))
1065  .addImm(MI.getOperand(2).getImm())
1066  .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1067  MI1New->getOperand(1).setIsKill(false);
1068  BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi))
1069  .add(MI.getOperand(1))
1070  // The Vectors are indexed in multiples of vector size.
1071  .addImm(MI.getOperand(2).getImm() + Offset)
1072  .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1073  MBB.erase(MI);
1074  return true;
1075  }
1076  case Hexagon::PS_true: {
1077  unsigned Reg = MI.getOperand(0).getReg();
1078  BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg)
1079  .addReg(Reg, RegState::Undef)
1080  .addReg(Reg, RegState::Undef);
1081  MBB.erase(MI);
1082  return true;
1083  }
1084  case Hexagon::PS_false: {
1085  unsigned Reg = MI.getOperand(0).getReg();
1086  BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg)
1087  .addReg(Reg, RegState::Undef)
1088  .addReg(Reg, RegState::Undef);
1089  MBB.erase(MI);
1090  return true;
1091  }
1092  case Hexagon::PS_vmulw: {
1093  // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies.
1094  unsigned DstReg = MI.getOperand(0).getReg();
1095  unsigned Src1Reg = MI.getOperand(1).getReg();
1096  unsigned Src2Reg = MI.getOperand(2).getReg();
1097  unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1098  unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1099  unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1100  unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1101  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1102  HRI.getSubReg(DstReg, Hexagon::isub_hi))
1103  .addReg(Src1SubHi)
1104  .addReg(Src2SubHi);
1105  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1106  HRI.getSubReg(DstReg, Hexagon::isub_lo))
1107  .addReg(Src1SubLo)
1108  .addReg(Src2SubLo);
1109  MBB.erase(MI);
1110  MRI.clearKillFlags(Src1SubHi);
1111  MRI.clearKillFlags(Src1SubLo);
1112  MRI.clearKillFlags(Src2SubHi);
1113  MRI.clearKillFlags(Src2SubLo);
1114  return true;
1115  }
1116  case Hexagon::PS_vmulw_acc: {
1117  // Expand 64-bit vector multiply with addition into 2 scalar multiplies.
1118  unsigned DstReg = MI.getOperand(0).getReg();
1119  unsigned Src1Reg = MI.getOperand(1).getReg();
1120  unsigned Src2Reg = MI.getOperand(2).getReg();
1121  unsigned Src3Reg = MI.getOperand(3).getReg();
1122  unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1123  unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1124  unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1125  unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1126  unsigned Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi);
1127  unsigned Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo);
1128  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1129  HRI.getSubReg(DstReg, Hexagon::isub_hi))
1130  .addReg(Src1SubHi)
1131  .addReg(Src2SubHi)
1132  .addReg(Src3SubHi);
1133  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1134  HRI.getSubReg(DstReg, Hexagon::isub_lo))
1135  .addReg(Src1SubLo)
1136  .addReg(Src2SubLo)
1137  .addReg(Src3SubLo);
1138  MBB.erase(MI);
1139  MRI.clearKillFlags(Src1SubHi);
1140  MRI.clearKillFlags(Src1SubLo);
1141  MRI.clearKillFlags(Src2SubHi);
1142  MRI.clearKillFlags(Src2SubLo);
1143  MRI.clearKillFlags(Src3SubHi);
1144  MRI.clearKillFlags(Src3SubLo);
1145  return true;
1146  }
1147  case Hexagon::PS_pselect: {
1148  const MachineOperand &Op0 = MI.getOperand(0);
1149  const MachineOperand &Op1 = MI.getOperand(1);
1150  const MachineOperand &Op2 = MI.getOperand(2);
1151  const MachineOperand &Op3 = MI.getOperand(3);
1152  unsigned Rd = Op0.getReg();
1153  unsigned Pu = Op1.getReg();
1154  unsigned Rs = Op2.getReg();
1155  unsigned Rt = Op3.getReg();
1156  DebugLoc DL = MI.getDebugLoc();
1157  unsigned K1 = getKillRegState(Op1.isKill());
1158  unsigned K2 = getKillRegState(Op2.isKill());
1159  unsigned K3 = getKillRegState(Op3.isKill());
1160  if (Rd != Rs)
1161  BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpt), Rd)
1162  .addReg(Pu, (Rd == Rt) ? K1 : 0)
1163  .addReg(Rs, K2);
1164  if (Rd != Rt)
1165  BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpf), Rd)
1166  .addReg(Pu, K1)
1167  .addReg(Rt, K3);
1168  MBB.erase(MI);
1169  return true;
1170  }
1171  case Hexagon::PS_vselect: {
1172  const MachineOperand &Op0 = MI.getOperand(0);
1173  const MachineOperand &Op1 = MI.getOperand(1);
1174  const MachineOperand &Op2 = MI.getOperand(2);
1175  const MachineOperand &Op3 = MI.getOperand(3);
1176  LivePhysRegs LiveAtMI(HRI);
1177  getLiveRegsAt(LiveAtMI, MI);
1178  bool IsDestLive = !LiveAtMI.available(MRI, Op0.getReg());
1179  unsigned PReg = Op1.getReg();
1180  assert(Op1.getSubReg() == 0);
1181  unsigned PState = getRegState(Op1);
1182 
1183  if (Op0.getReg() != Op2.getReg()) {
1184  unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1185  : PState;
1186  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov))
1187  .add(Op0)
1188  .addReg(PReg, S)
1189  .add(Op2);
1190  if (IsDestLive)
1191  T.addReg(Op0.getReg(), RegState::Implicit);
1192  IsDestLive = true;
1193  }
1194  if (Op0.getReg() != Op3.getReg()) {
1195  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov))
1196  .add(Op0)
1197  .addReg(PReg, PState)
1198  .add(Op3);
1199  if (IsDestLive)
1200  T.addReg(Op0.getReg(), RegState::Implicit);
1201  }
1202  MBB.erase(MI);
1203  return true;
1204  }
1205  case Hexagon::PS_wselect: {
1206  MachineOperand &Op0 = MI.getOperand(0);
1207  MachineOperand &Op1 = MI.getOperand(1);
1208  MachineOperand &Op2 = MI.getOperand(2);
1209  MachineOperand &Op3 = MI.getOperand(3);
1210  LivePhysRegs LiveAtMI(HRI);
1211  getLiveRegsAt(LiveAtMI, MI);
1212  bool IsDestLive = !LiveAtMI.available(MRI, Op0.getReg());
1213  unsigned PReg = Op1.getReg();
1214  assert(Op1.getSubReg() == 0);
1215  unsigned PState = getRegState(Op1);
1216 
1217  if (Op0.getReg() != Op2.getReg()) {
1218  unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1219  : PState;
1220  unsigned SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo);
1221  unsigned SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi);
1222  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine))
1223  .add(Op0)
1224  .addReg(PReg, S)
1225  .add(Op1)
1226  .addReg(SrcHi)
1227  .addReg(SrcLo);
1228  if (IsDestLive)
1229  T.addReg(Op0.getReg(), RegState::Implicit);
1230  IsDestLive = true;
1231  }
1232  if (Op0.getReg() != Op3.getReg()) {
1233  unsigned SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo);
1234  unsigned SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi);
1235  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine))
1236  .add(Op0)
1237  .addReg(PReg, PState)
1238  .addReg(SrcHi)
1239  .addReg(SrcLo);
1240  if (IsDestLive)
1241  T.addReg(Op0.getReg(), RegState::Implicit);
1242  }
1243  MBB.erase(MI);
1244  return true;
1245  }
1246  case Hexagon::PS_tailcall_i:
1247  MI.setDesc(get(Hexagon::J2_jump));
1248  return true;
1249  case Hexagon::PS_tailcall_r:
1250  case Hexagon::PS_jmpret:
1251  MI.setDesc(get(Hexagon::J2_jumpr));
1252  return true;
1253  case Hexagon::PS_jmprett:
1254  MI.setDesc(get(Hexagon::J2_jumprt));
1255  return true;
1256  case Hexagon::PS_jmpretf:
1257  MI.setDesc(get(Hexagon::J2_jumprf));
1258  return true;
1259  case Hexagon::PS_jmprettnewpt:
1260  MI.setDesc(get(Hexagon::J2_jumprtnewpt));
1261  return true;
1262  case Hexagon::PS_jmpretfnewpt:
1263  MI.setDesc(get(Hexagon::J2_jumprfnewpt));
1264  return true;
1265  case Hexagon::PS_jmprettnew:
1266  MI.setDesc(get(Hexagon::J2_jumprtnew));
1267  return true;
1268  case Hexagon::PS_jmpretfnew:
1269  MI.setDesc(get(Hexagon::J2_jumprfnew));
1270  return true;
1271  }
1272 
1273  return false;
1274 }
1275 
1276 // We indicate that we want to reverse the branch by
1277 // inserting the reversed branching opcode.
1279  SmallVectorImpl<MachineOperand> &Cond) const {
1280  if (Cond.empty())
1281  return true;
1282  assert(Cond[0].isImm() && "First entry in the cond vector not imm-val");
1283  unsigned opcode = Cond[0].getImm();
1284  //unsigned temp;
1285  assert(get(opcode).isBranch() && "Should be a branching condition.");
1286  if (isEndLoopN(opcode))
1287  return true;
1288  unsigned NewOpcode = getInvertedPredicatedOpcode(opcode);
1289  Cond[0].setImm(NewOpcode);
1290  return false;
1291 }
1292 
1295  DebugLoc DL;
1296  BuildMI(MBB, MI, DL, get(Hexagon::A2_nop));
1297 }
1298 
1300  return getAddrMode(MI) == HexagonII::PostInc;
1301 }
1302 
1303 // Returns true if an instruction is predicated irrespective of the predicate
1304 // sense. For example, all of the following will return true.
1305 // if (p0) R1 = add(R2, R3)
1306 // if (!p0) R1 = add(R2, R3)
1307 // if (p0.new) R1 = add(R2, R3)
1308 // if (!p0.new) R1 = add(R2, R3)
1309 // Note: New-value stores are not included here as in the current
1310 // implementation, we don't need to check their predicate sense.
1312  const uint64_t F = MI.getDesc().TSFlags;
1314 }
1315 
1317  MachineInstr &MI, ArrayRef<MachineOperand> Cond) const {
1318  if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
1319  isEndLoopN(Cond[0].getImm())) {
1320  DEBUG(dbgs() << "\nCannot predicate:"; MI.dump(););
1321  return false;
1322  }
1323  int Opc = MI.getOpcode();
1324  assert (isPredicable(MI) && "Expected predicable instruction");
1325  bool invertJump = predOpcodeHasNot(Cond);
1326 
1327  // We have to predicate MI "in place", i.e. after this function returns,
1328  // MI will need to be transformed into a predicated form. To avoid com-
1329  // plicated manipulations with the operands (handling tied operands,
1330  // etc.), build a new temporary instruction, then overwrite MI with it.
1331 
1332  MachineBasicBlock &B = *MI.getParent();
1333  DebugLoc DL = MI.getDebugLoc();
1334  unsigned PredOpc = getCondOpcode(Opc, invertJump);
1335  MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc));
1336  unsigned NOp = 0, NumOps = MI.getNumOperands();
1337  while (NOp < NumOps) {
1338  MachineOperand &Op = MI.getOperand(NOp);
1339  if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1340  break;
1341  T.add(Op);
1342  NOp++;
1343  }
1344 
1345  unsigned PredReg, PredRegPos, PredRegFlags;
1346  bool GotPredReg = getPredReg(Cond, PredReg, PredRegPos, PredRegFlags);
1347  (void)GotPredReg;
1348  assert(GotPredReg);
1349  T.addReg(PredReg, PredRegFlags);
1350  while (NOp < NumOps)
1351  T.add(MI.getOperand(NOp++));
1352 
1353  MI.setDesc(get(PredOpc));
1354  while (unsigned n = MI.getNumOperands())
1355  MI.RemoveOperand(n-1);
1356  for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i)
1357  MI.addOperand(T->getOperand(i));
1358 
1360  B.erase(TI);
1361 
1363  MRI.clearKillFlags(PredReg);
1364  return true;
1365 }
1366 
1368  ArrayRef<MachineOperand> Pred2) const {
1369  // TODO: Fix this
1370  return false;
1371 }
1372 
1374  std::vector<MachineOperand> &Pred) const {
1375  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1376 
1377  for (unsigned oper = 0; oper < MI.getNumOperands(); ++oper) {
1378  MachineOperand MO = MI.getOperand(oper);
1379  if (MO.isReg()) {
1380  if (!MO.isDef())
1381  continue;
1382  const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg());
1383  if (RC == &Hexagon::PredRegsRegClass) {
1384  Pred.push_back(MO);
1385  return true;
1386  }
1387  continue;
1388  } else if (MO.isRegMask()) {
1389  for (unsigned PR : Hexagon::PredRegsRegClass) {
1390  if (!MI.modifiesRegister(PR, &HRI))
1391  continue;
1392  Pred.push_back(MO);
1393  return true;
1394  }
1395  }
1396  }
1397  return false;
1398 }
1399 
1401  if (!MI.getDesc().isPredicable())
1402  return false;
1403 
1404  if (MI.isCall() || isTailCall(MI)) {
1405  if (!Subtarget.usePredicatedCalls())
1406  return false;
1407  }
1408 
1409  // HVX loads are not predicable on v60, but are on v62.
1410  if (!Subtarget.hasV62TOps()) {
1411  switch (MI.getOpcode()) {
1412  case Hexagon::V6_vL32b_ai:
1413  case Hexagon::V6_vL32b_pi:
1414  case Hexagon::V6_vL32b_ppu:
1415  case Hexagon::V6_vL32b_cur_ai:
1416  case Hexagon::V6_vL32b_cur_pi:
1417  case Hexagon::V6_vL32b_cur_ppu:
1418  case Hexagon::V6_vL32b_nt_ai:
1419  case Hexagon::V6_vL32b_nt_pi:
1420  case Hexagon::V6_vL32b_nt_ppu:
1421  case Hexagon::V6_vL32b_tmp_ai:
1422  case Hexagon::V6_vL32b_tmp_pi:
1423  case Hexagon::V6_vL32b_tmp_ppu:
1424  case Hexagon::V6_vL32b_nt_cur_ai:
1425  case Hexagon::V6_vL32b_nt_cur_pi:
1426  case Hexagon::V6_vL32b_nt_cur_ppu:
1427  case Hexagon::V6_vL32b_nt_tmp_ai:
1428  case Hexagon::V6_vL32b_nt_tmp_pi:
1429  case Hexagon::V6_vL32b_nt_tmp_ppu:
1430  return false;
1431  }
1432  }
1433  return true;
1434 }
1435 
1437  const MachineBasicBlock *MBB,
1438  const MachineFunction &MF) const {
1439  // Debug info is never a scheduling boundary. It's necessary to be explicit
1440  // due to the special treatment of IT instructions below, otherwise a
1441  // dbg_value followed by an IT will result in the IT instruction being
1442  // considered a scheduling hazard, which is wrong. It should be the actual
1443  // instruction preceding the dbg_value instruction(s), just like it is
1444  // when debug info is not present.
1445  if (MI.isDebugValue())
1446  return false;
1447 
1448  // Throwing call is a boundary.
1449  if (MI.isCall()) {
1450  // Don't mess around with no return calls.
1451  if (doesNotReturn(MI))
1452  return true;
1453  // If any of the block's successors is a landing pad, this could be a
1454  // throwing call.
1455  for (auto I : MBB->successors())
1456  if (I->isEHPad())
1457  return true;
1458  }
1459 
1460  // Terminators and labels can't be scheduled around.
1461  if (MI.getDesc().isTerminator() || MI.isPosition())
1462  return true;
1463 
1464  if (MI.isInlineAsm() && !ScheduleInlineAsm)
1465  return true;
1466 
1467  return false;
1468 }
1469 
1470 /// Measure the specified inline asm to determine an approximation of its
1471 /// length.
1472 /// Comments (which run till the next SeparatorString or newline) do not
1473 /// count as an instruction.
1474 /// Any other non-whitespace text is considered an instruction, with
1475 /// multiple instructions separated by SeparatorString or newlines.
1476 /// Variable-length instructions are not handled here; this function
1477 /// may be overloaded in the target code to do that.
1478 /// Hexagon counts the number of ##'s and adjust for that many
1479 /// constant exenders.
1480 unsigned HexagonInstrInfo::getInlineAsmLength(const char *Str,
1481  const MCAsmInfo &MAI) const {
1482  StringRef AStr(Str);
1483  // Count the number of instructions in the asm.
1484  bool atInsnStart = true;
1485  unsigned Length = 0;
1486  for (; *Str; ++Str) {
1487  if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
1488  strlen(MAI.getSeparatorString())) == 0)
1489  atInsnStart = true;
1490  if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
1491  Length += MAI.getMaxInstLength();
1492  atInsnStart = false;
1493  }
1494  if (atInsnStart && strncmp(Str, MAI.getCommentString().data(),
1495  MAI.getCommentString().size()) == 0)
1496  atInsnStart = false;
1497  }
1498 
1499  // Add to size number of constant extenders seen * 4.
1500  StringRef Occ("##");
1501  Length += AStr.count(Occ)*4;
1502  return Length;
1503 }
1504 
1507  const InstrItineraryData *II, const ScheduleDAG *DAG) const {
1508  if (UseDFAHazardRec)
1509  return new HexagonHazardRecognizer(II, this, Subtarget);
1511 }
1512 
1513 /// \brief For a comparison instruction, return the source registers in
1514 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
1515 /// compares against in CmpValue. Return true if the comparison instruction
1516 /// can be analyzed.
1517 bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
1518  unsigned &SrcReg2, int &Mask,
1519  int &Value) const {
1520  unsigned Opc = MI.getOpcode();
1521 
1522  // Set mask and the first source register.
1523  switch (Opc) {
1524  case Hexagon::C2_cmpeq:
1525  case Hexagon::C2_cmpeqp:
1526  case Hexagon::C2_cmpgt:
1527  case Hexagon::C2_cmpgtp:
1528  case Hexagon::C2_cmpgtu:
1529  case Hexagon::C2_cmpgtup:
1530  case Hexagon::C4_cmpneq:
1531  case Hexagon::C4_cmplte:
1532  case Hexagon::C4_cmplteu:
1533  case Hexagon::C2_cmpeqi:
1534  case Hexagon::C2_cmpgti:
1535  case Hexagon::C2_cmpgtui:
1536  case Hexagon::C4_cmpneqi:
1537  case Hexagon::C4_cmplteui:
1538  case Hexagon::C4_cmpltei:
1539  SrcReg = MI.getOperand(1).getReg();
1540  Mask = ~0;
1541  break;
1542  case Hexagon::A4_cmpbeq:
1543  case Hexagon::A4_cmpbgt:
1544  case Hexagon::A4_cmpbgtu:
1545  case Hexagon::A4_cmpbeqi:
1546  case Hexagon::A4_cmpbgti:
1547  case Hexagon::A4_cmpbgtui:
1548  SrcReg = MI.getOperand(1).getReg();
1549  Mask = 0xFF;
1550  break;
1551  case Hexagon::A4_cmpheq:
1552  case Hexagon::A4_cmphgt:
1553  case Hexagon::A4_cmphgtu:
1554  case Hexagon::A4_cmpheqi:
1555  case Hexagon::A4_cmphgti:
1556  case Hexagon::A4_cmphgtui:
1557  SrcReg = MI.getOperand(1).getReg();
1558  Mask = 0xFFFF;
1559  break;
1560  }
1561 
1562  // Set the value/second source register.
1563  switch (Opc) {
1564  case Hexagon::C2_cmpeq:
1565  case Hexagon::C2_cmpeqp:
1566  case Hexagon::C2_cmpgt:
1567  case Hexagon::C2_cmpgtp:
1568  case Hexagon::C2_cmpgtu:
1569  case Hexagon::C2_cmpgtup:
1570  case Hexagon::A4_cmpbeq:
1571  case Hexagon::A4_cmpbgt:
1572  case Hexagon::A4_cmpbgtu:
1573  case Hexagon::A4_cmpheq:
1574  case Hexagon::A4_cmphgt:
1575  case Hexagon::A4_cmphgtu:
1576  case Hexagon::C4_cmpneq:
1577  case Hexagon::C4_cmplte:
1578  case Hexagon::C4_cmplteu:
1579  SrcReg2 = MI.getOperand(2).getReg();
1580  return true;
1581 
1582  case Hexagon::C2_cmpeqi:
1583  case Hexagon::C2_cmpgtui:
1584  case Hexagon::C2_cmpgti:
1585  case Hexagon::C4_cmpneqi:
1586  case Hexagon::C4_cmplteui:
1587  case Hexagon::C4_cmpltei:
1588  case Hexagon::A4_cmpbeqi:
1589  case Hexagon::A4_cmpbgti:
1590  case Hexagon::A4_cmpbgtui:
1591  case Hexagon::A4_cmpheqi:
1592  case Hexagon::A4_cmphgti:
1593  case Hexagon::A4_cmphgtui: {
1594  SrcReg2 = 0;
1595  const MachineOperand &Op2 = MI.getOperand(2);
1596  if (!Op2.isImm())
1597  return false;
1598  Value = MI.getOperand(2).getImm();
1599  return true;
1600  }
1601  }
1602 
1603  return false;
1604 }
1605 
1607  const MachineInstr &MI,
1608  unsigned *PredCost) const {
1609  return getInstrTimingClassLatency(ItinData, MI);
1610 }
1611 
1613  const TargetSubtargetInfo &STI) const {
1614  const InstrItineraryData *II = STI.getInstrItineraryData();
1615  return static_cast<const HexagonSubtarget&>(STI).createDFAPacketizer(II);
1616 }
1617 
1618 // Inspired by this pair:
1619 // %R13<def> = L2_loadri_io %R29, 136; mem:LD4[FixedStack0]
1620 // S2_storeri_io %R29, 132, %R1<kill>; flags: mem:ST4[FixedStack1]
1621 // Currently AA considers the addresses in these instructions to be aliasing.
1623  MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
1624  if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1626  return false;
1627 
1628  // Instructions that are pure loads, not loads and stores like memops are not
1629  // dependent.
1630  if (MIa.mayLoad() && !isMemOp(MIa) && MIb.mayLoad() && !isMemOp(MIb))
1631  return true;
1632 
1633  // Get the base register in MIa.
1634  unsigned BasePosA, OffsetPosA;
1635  if (!getBaseAndOffsetPosition(MIa, BasePosA, OffsetPosA))
1636  return false;
1637  const MachineOperand &BaseA = MIa.getOperand(BasePosA);
1638  unsigned BaseRegA = BaseA.getReg();
1639  unsigned BaseSubA = BaseA.getSubReg();
1640 
1641  // Get the base register in MIb.
1642  unsigned BasePosB, OffsetPosB;
1643  if (!getBaseAndOffsetPosition(MIb, BasePosB, OffsetPosB))
1644  return false;
1645  const MachineOperand &BaseB = MIb.getOperand(BasePosB);
1646  unsigned BaseRegB = BaseB.getReg();
1647  unsigned BaseSubB = BaseB.getSubReg();
1648 
1649  if (BaseRegA != BaseRegB || BaseSubA != BaseSubB)
1650  return false;
1651 
1652  // Get the access sizes.
1653  unsigned SizeA = getMemAccessSize(MIa);
1654  unsigned SizeB = getMemAccessSize(MIb);
1655 
1656  // Get the offsets. Handle immediates only for now.
1657  const MachineOperand &OffA = MIa.getOperand(OffsetPosA);
1658  const MachineOperand &OffB = MIb.getOperand(OffsetPosB);
1659  if (!MIa.getOperand(OffsetPosA).isImm() ||
1660  !MIb.getOperand(OffsetPosB).isImm())
1661  return false;
1662  int OffsetA = isPostIncrement(MIa) ? 0 : OffA.getImm();
1663  int OffsetB = isPostIncrement(MIb) ? 0 : OffB.getImm();
1664 
1665  // This is a mem access with the same base register and known offsets from it.
1666  // Reason about it.
1667  if (OffsetA > OffsetB) {
1668  uint64_t OffDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB);
1669  return SizeB <= OffDiff;
1670  }
1671  if (OffsetA < OffsetB) {
1672  uint64_t OffDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA);
1673  return SizeA <= OffDiff;
1674  }
1675 
1676  return false;
1677 }
1678 
1679 /// If the instruction is an increment of a constant value, return the amount.
1681  int &Value) const {
1682  if (isPostIncrement(MI)) {
1683  unsigned BasePos = 0, OffsetPos = 0;
1684  if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
1685  return false;
1686  const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
1687  if (OffsetOp.isImm()) {
1688  Value = OffsetOp.getImm();
1689  return true;
1690  }
1691  } else if (MI.getOpcode() == Hexagon::A2_addi) {
1692  const MachineOperand &AddOp = MI.getOperand(2);
1693  if (AddOp.isImm()) {
1694  Value = AddOp.getImm();
1695  return true;
1696  }
1697  }
1698 
1699  return false;
1700 }
1701 
1702 std::pair<unsigned, unsigned>
1704  return std::make_pair(TF & ~HexagonII::MO_Bitmasks,
1705  TF & HexagonII::MO_Bitmasks);
1706 }
1707 
1710  using namespace HexagonII;
1711 
1712  static const std::pair<unsigned, const char*> Flags[] = {
1713  {MO_PCREL, "hexagon-pcrel"},
1714  {MO_GOT, "hexagon-got"},
1715  {MO_LO16, "hexagon-lo16"},
1716  {MO_HI16, "hexagon-hi16"},
1717  {MO_GPREL, "hexagon-gprel"},
1718  {MO_GDGOT, "hexagon-gdgot"},
1719  {MO_GDPLT, "hexagon-gdplt"},
1720  {MO_IE, "hexagon-ie"},
1721  {MO_IEGOT, "hexagon-iegot"},
1722  {MO_TPREL, "hexagon-tprel"}
1723  };
1724  return makeArrayRef(Flags);
1725 }
1726 
1729  using namespace HexagonII;
1730 
1731  static const std::pair<unsigned, const char*> Flags[] = {
1732  {HMOTF_ConstExtended, "hexagon-ext"}
1733  };
1734  return makeArrayRef(Flags);
1735 }
1736 
1739  const TargetRegisterClass *TRC;
1740  if (VT == MVT::i1) {
1741  TRC = &Hexagon::PredRegsRegClass;
1742  } else if (VT == MVT::i32 || VT == MVT::f32) {
1743  TRC = &Hexagon::IntRegsRegClass;
1744  } else if (VT == MVT::i64 || VT == MVT::f64) {
1745  TRC = &Hexagon::DoubleRegsRegClass;
1746  } else {
1747  llvm_unreachable("Cannot handle this register class");
1748  }
1749 
1750  unsigned NewReg = MRI.createVirtualRegister(TRC);
1751  return NewReg;
1752 }
1753 
1755  return (getAddrMode(MI) == HexagonII::AbsoluteSet);
1756 }
1757 
1759  const uint64_t F = MI.getDesc().TSFlags;
1761 }
1762 
1764  return !isTC1(MI) && !isTC2Early(MI) && !MI.getDesc().mayLoad() &&
1765  !MI.getDesc().mayStore() &&
1766  MI.getDesc().getOpcode() != Hexagon::S2_allocframe &&
1767  MI.getDesc().getOpcode() != Hexagon::L2_deallocframe &&
1768  !isMemOp(MI) && !MI.isBranch() && !MI.isReturn() && !MI.isCall();
1769 }
1770 
1771 // Return true if the instruction is a compund branch instruction.
1773  return getType(MI) == HexagonII::TypeCJ && MI.isBranch();
1774 }
1775 
1776 // TODO: In order to have isExtendable for fpimm/f32Ext, we need to handle
1777 // isFPImm and later getFPImm as well.
1779  const uint64_t F = MI.getDesc().TSFlags;
1781  if (isExtended) // Instruction must be extended.
1782  return true;
1783 
1784  unsigned isExtendable =
1786  if (!isExtendable)
1787  return false;
1788 
1789  if (MI.isCall())
1790  return false;
1791 
1792  short ExtOpNum = getCExtOpNum(MI);
1793  const MachineOperand &MO = MI.getOperand(ExtOpNum);
1794  // Use MO operand flags to determine if MO
1795  // has the HMOTF_ConstExtended flag set.
1797  return true;
1798  // If this is a Machine BB address we are talking about, and it is
1799  // not marked as extended, say so.
1800  if (MO.isMBB())
1801  return false;
1802 
1803  // We could be using an instruction with an extendable immediate and shoehorn
1804  // a global address into it. If it is a global address it will be constant
1805  // extended. We do this for COMBINE.
1806  if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
1807  MO.isJTI() || MO.isCPI() || MO.isFPImm())
1808  return true;
1809 
1810  // If the extendable operand is not 'Immediate' type, the instruction should
1811  // have 'isExtended' flag set.
1812  assert(MO.isImm() && "Extendable operand must be Immediate type");
1813 
1814  int MinValue = getMinValue(MI);
1815  int MaxValue = getMaxValue(MI);
1816  int ImmValue = MO.getImm();
1817 
1818  return (ImmValue < MinValue || ImmValue > MaxValue);
1819 }
1820 
1822  switch (MI.getOpcode()) {
1823  case Hexagon::L4_return:
1824  case Hexagon::L4_return_t:
1825  case Hexagon::L4_return_f:
1826  case Hexagon::L4_return_tnew_pnt:
1827  case Hexagon::L4_return_fnew_pnt:
1828  case Hexagon::L4_return_tnew_pt:
1829  case Hexagon::L4_return_fnew_pt:
1830  return true;
1831  }
1832  return false;
1833 }
1834 
1835 // Return true when ConsMI uses a register defined by ProdMI.
1837  const MachineInstr &ConsMI) const {
1838  if (!ProdMI.getDesc().getNumDefs())
1839  return false;
1840  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1841 
1846 
1847  parseOperands(ProdMI, DefsA, UsesA);
1848  parseOperands(ConsMI, DefsB, UsesB);
1849 
1850  for (auto &RegA : DefsA)
1851  for (auto &RegB : UsesB) {
1852  // True data dependency.
1853  if (RegA == RegB)
1854  return true;
1855 
1857  for (MCSubRegIterator SubRegs(RegA, &HRI); SubRegs.isValid(); ++SubRegs)
1858  if (RegB == *SubRegs)
1859  return true;
1860 
1862  for (MCSubRegIterator SubRegs(RegB, &HRI); SubRegs.isValid(); ++SubRegs)
1863  if (RegA == *SubRegs)
1864  return true;
1865  }
1866 
1867  return false;
1868 }
1869 
1870 // Returns true if the instruction is alread a .cur.
1872  switch (MI.getOpcode()) {
1873  case Hexagon::V6_vL32b_cur_pi:
1874  case Hexagon::V6_vL32b_cur_ai:
1875  return true;
1876  }
1877  return false;
1878 }
1879 
1880 // Returns true, if any one of the operands is a dot new
1881 // insn, whether it is predicated dot new or register dot new.
1883  if (isNewValueInst(MI) || (isPredicated(MI) && isPredicatedNew(MI)))
1884  return true;
1885 
1886  return false;
1887 }
1888 
1889 /// Symmetrical. See if these two instructions are fit for duplex pair.
1891  const MachineInstr &MIb) const {
1894  return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG));
1895 }
1896 
1898  if (MI.mayLoad() || MI.mayStore() || MI.isCompare())
1899  return true;
1900 
1901  // Multiply
1902  unsigned SchedClass = MI.getDesc().getSchedClass();
1903  return is_TC4x(SchedClass) || is_TC3x(SchedClass);
1904 }
1905 
1906 bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
1907  return (Opcode == Hexagon::ENDLOOP0 ||
1908  Opcode == Hexagon::ENDLOOP1);
1909 }
1910 
1911 bool HexagonInstrInfo::isExpr(unsigned OpType) const {
1912  switch(OpType) {
1919  return true;
1920  default:
1921  return false;
1922  }
1923 }
1924 
1926  const MCInstrDesc &MID = MI.getDesc();
1927  const uint64_t F = MID.TSFlags;
1929  return true;
1930 
1931  // TODO: This is largely obsolete now. Will need to be removed
1932  // in consecutive patches.
1933  switch (MI.getOpcode()) {
1934  // PS_fi and PS_fia remain special cases.
1935  case Hexagon::PS_fi:
1936  case Hexagon::PS_fia:
1937  return true;
1938  default:
1939  return false;
1940  }
1941  return false;
1942 }
1943 
1944 // This returns true in two cases:
1945 // - The OP code itself indicates that this is an extended instruction.
1946 // - One of MOs has been marked with HMOTF_ConstExtended flag.
1948  // First check if this is permanently extended op code.
1949  const uint64_t F = MI.getDesc().TSFlags;
1951  return true;
1952  // Use MO operand flags to determine if one of MI's operands
1953  // has HMOTF_ConstExtended flag set.
1954  for (const MachineOperand &MO : MI.operands())
1955  if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
1956  return true;
1957  return false;
1958 }
1959 
1961  unsigned Opcode = MI.getOpcode();
1962  const uint64_t F = get(Opcode).TSFlags;
1963  return (F >> HexagonII::FPPos) & HexagonII::FPMask;
1964 }
1965 
1966 // No V60 HVX VMEM with A_INDIRECT.
1968  const MachineInstr &J) const {
1969  if (!isHVXVec(I))
1970  return false;
1971  if (!I.mayLoad() && !I.mayStore())
1972  return false;
1973  return J.isIndirectBranch() || isIndirectCall(J) || isIndirectL4Return(J);
1974 }
1975 
1977  switch (MI.getOpcode()) {
1978  case Hexagon::J2_callr:
1979  case Hexagon::J2_callrf:
1980  case Hexagon::J2_callrt:
1981  case Hexagon::PS_call_nr:
1982  return true;
1983  }
1984  return false;
1985 }
1986 
1988  switch (MI.getOpcode()) {
1989  case Hexagon::L4_return:
1990  case Hexagon::L4_return_t:
1991  case Hexagon::L4_return_f:
1992  case Hexagon::L4_return_fnew_pnt:
1993  case Hexagon::L4_return_fnew_pt:
1994  case Hexagon::L4_return_tnew_pnt:
1995  case Hexagon::L4_return_tnew_pt:
1996  return true;
1997  }
1998  return false;
1999 }
2000 
2002  switch (MI.getOpcode()) {
2003  case Hexagon::J2_jumpr:
2004  case Hexagon::J2_jumprt:
2005  case Hexagon::J2_jumprf:
2006  case Hexagon::J2_jumprtnewpt:
2007  case Hexagon::J2_jumprfnewpt:
2008  case Hexagon::J2_jumprtnew:
2009  case Hexagon::J2_jumprfnew:
2010  return true;
2011  }
2012  return false;
2013 }
2014 
2015 // Return true if a given MI can accommodate given offset.
2016 // Use abs estimate as oppose to the exact number.
2017 // TODO: This will need to be changed to use MC level
2018 // definition of instruction extendable field size.
2020  unsigned offset) const {
2021  // This selection of jump instructions matches to that what
2022  // analyzeBranch can parse, plus NVJ.
2023  if (isNewValueJump(MI)) // r9:2
2024  return isInt<11>(offset);
2025 
2026  switch (MI.getOpcode()) {
2027  // Still missing Jump to address condition on register value.
2028  default:
2029  return false;
2030  case Hexagon::J2_jump: // bits<24> dst; // r22:2
2031  case Hexagon::J2_call:
2032  case Hexagon::PS_call_nr:
2033  return isInt<24>(offset);
2034  case Hexagon::J2_jumpt: //bits<17> dst; // r15:2
2035  case Hexagon::J2_jumpf:
2036  case Hexagon::J2_jumptnew:
2037  case Hexagon::J2_jumptnewpt:
2038  case Hexagon::J2_jumpfnew:
2039  case Hexagon::J2_jumpfnewpt:
2040  case Hexagon::J2_callt:
2041  case Hexagon::J2_callf:
2042  return isInt<17>(offset);
2043  case Hexagon::J2_loop0i:
2044  case Hexagon::J2_loop0iext:
2045  case Hexagon::J2_loop0r:
2046  case Hexagon::J2_loop0rext:
2047  case Hexagon::J2_loop1i:
2048  case Hexagon::J2_loop1iext:
2049  case Hexagon::J2_loop1r:
2050  case Hexagon::J2_loop1rext:
2051  return isInt<9>(offset);
2052  // TODO: Add all the compound branches here. Can we do this in Relation model?
2053  case Hexagon::J4_cmpeqi_tp0_jump_nt:
2054  case Hexagon::J4_cmpeqi_tp1_jump_nt:
2055  return isInt<11>(offset);
2056  }
2057 }
2058 
2060  const MachineInstr &ESMI) const {
2061  bool isLate = isLateResultInstr(LRMI);
2062  bool isEarly = isEarlySourceInstr(ESMI);
2063 
2064  DEBUG(dbgs() << "V60" << (isLate ? "-LR " : " -- "));
2065  DEBUG(LRMI.dump());
2066  DEBUG(dbgs() << "V60" << (isEarly ? "-ES " : " -- "));
2067  DEBUG(ESMI.dump());
2068 
2069  if (isLate && isEarly) {
2070  DEBUG(dbgs() << "++Is Late Result feeding Early Source\n");
2071  return true;
2072  }
2073 
2074  return false;
2075 }
2076 
2078  switch (MI.getOpcode()) {
2079  case TargetOpcode::EXTRACT_SUBREG:
2080  case TargetOpcode::INSERT_SUBREG:
2081  case TargetOpcode::SUBREG_TO_REG:
2082  case TargetOpcode::REG_SEQUENCE:
2083  case TargetOpcode::IMPLICIT_DEF:
2084  case TargetOpcode::COPY:
2086  case TargetOpcode::PHI:
2087  return false;
2088  default:
2089  break;
2090  }
2091 
2092  unsigned SchedClass = MI.getDesc().getSchedClass();
2093  return !is_TC1(SchedClass);
2094 }
2095 
2097  // Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
2098  // resource, but all operands can be received late like an ALU instruction.
2099  return getType(MI) == HexagonII::TypeCVI_VX_LATE;
2100 }
2101 
2103  unsigned Opcode = MI.getOpcode();
2104  return Opcode == Hexagon::J2_loop0i ||
2105  Opcode == Hexagon::J2_loop0r ||
2106  Opcode == Hexagon::J2_loop0iext ||
2107  Opcode == Hexagon::J2_loop0rext ||
2108  Opcode == Hexagon::J2_loop1i ||
2109  Opcode == Hexagon::J2_loop1r ||
2110  Opcode == Hexagon::J2_loop1iext ||
2111  Opcode == Hexagon::J2_loop1rext;
2112 }
2113 
2115  switch (MI.getOpcode()) {
2116  default: return false;
2117  case Hexagon::L4_iadd_memopw_io:
2118  case Hexagon::L4_isub_memopw_io:
2119  case Hexagon::L4_add_memopw_io:
2120  case Hexagon::L4_sub_memopw_io:
2121  case Hexagon::L4_and_memopw_io:
2122  case Hexagon::L4_or_memopw_io:
2123  case Hexagon::L4_iadd_memoph_io:
2124  case Hexagon::L4_isub_memoph_io:
2125  case Hexagon::L4_add_memoph_io:
2126  case Hexagon::L4_sub_memoph_io:
2127  case Hexagon::L4_and_memoph_io:
2128  case Hexagon::L4_or_memoph_io:
2129  case Hexagon::L4_iadd_memopb_io:
2130  case Hexagon::L4_isub_memopb_io:
2131  case Hexagon::L4_add_memopb_io:
2132  case Hexagon::L4_sub_memopb_io:
2133  case Hexagon::L4_and_memopb_io:
2134  case Hexagon::L4_or_memopb_io:
2135  case Hexagon::L4_ior_memopb_io:
2136  case Hexagon::L4_ior_memoph_io:
2137  case Hexagon::L4_ior_memopw_io:
2138  case Hexagon::L4_iand_memopb_io:
2139  case Hexagon::L4_iand_memoph_io:
2140  case Hexagon::L4_iand_memopw_io:
2141  return true;
2142  }
2143  return false;
2144 }
2145 
2147  const uint64_t F = MI.getDesc().TSFlags;
2149 }
2150 
2151 bool HexagonInstrInfo::isNewValue(unsigned Opcode) const {
2152  const uint64_t F = get(Opcode).TSFlags;
2154 }
2155 
2157  return isNewValueJump(MI) || isNewValueStore(MI);
2158 }
2159 
2161  return isNewValue(MI) && MI.isBranch();
2162 }
2163 
2164 bool HexagonInstrInfo::isNewValueJump(unsigned Opcode) const {
2165  return isNewValue(Opcode) && get(Opcode).isBranch() && isPredicated(Opcode);
2166 }
2167 
2169  const uint64_t F = MI.getDesc().TSFlags;
2171 }
2172 
2173 bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
2174  const uint64_t F = get(Opcode).TSFlags;
2176 }
2177 
2178 // Returns true if a particular operand is extendable for an instruction.
2180  unsigned OperandNum) const {
2181  const uint64_t F = MI.getDesc().TSFlags;
2183  == OperandNum;
2184 }
2185 
2187  const uint64_t F = MI.getDesc().TSFlags;
2188  assert(isPredicated(MI));
2190 }
2191 
2192 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
2193  const uint64_t F = get(Opcode).TSFlags;
2194  assert(isPredicated(Opcode));
2196 }
2197 
2199  const uint64_t F = MI.getDesc().TSFlags;
2200  return !((F >> HexagonII::PredicatedFalsePos) &
2202 }
2203 
2204 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
2205  const uint64_t F = get(Opcode).TSFlags;
2206  // Make sure that the instruction is predicated.
2208  return !((F >> HexagonII::PredicatedFalsePos) &
2210 }
2211 
2212 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
2213  const uint64_t F = get(Opcode).TSFlags;
2215 }
2216 
2217 bool HexagonInstrInfo::isPredicateLate(unsigned Opcode) const {
2218  const uint64_t F = get(Opcode).TSFlags;
2220 }
2221 
2222 bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const {
2223  const uint64_t F = get(Opcode).TSFlags;
2224  assert(get(Opcode).isBranch() &&
2225  (isPredicatedNew(Opcode) || isNewValue(Opcode)));
2226  return (F >> HexagonII::TakenPos) & HexagonII::TakenMask;
2227 }
2228 
2230  return MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2231  MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
2232  MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
2233  MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
2234 }
2235 
2237  switch (MI.getOpcode()) {
2238  // Byte
2239  case Hexagon::L2_loadrb_io:
2240  case Hexagon::L4_loadrb_ur:
2241  case Hexagon::L4_loadrb_ap:
2242  case Hexagon::L2_loadrb_pr:
2243  case Hexagon::L2_loadrb_pbr:
2244  case Hexagon::L2_loadrb_pi:
2245  case Hexagon::L2_loadrb_pci:
2246  case Hexagon::L2_loadrb_pcr:
2247  case Hexagon::L2_loadbsw2_io:
2248  case Hexagon::L4_loadbsw2_ur:
2249  case Hexagon::L4_loadbsw2_ap:
2250  case Hexagon::L2_loadbsw2_pr:
2251  case Hexagon::L2_loadbsw2_pbr:
2252  case Hexagon::L2_loadbsw2_pi:
2253  case Hexagon::L2_loadbsw2_pci:
2254  case Hexagon::L2_loadbsw2_pcr:
2255  case Hexagon::L2_loadbsw4_io:
2256  case Hexagon::L4_loadbsw4_ur:
2257  case Hexagon::L4_loadbsw4_ap:
2258  case Hexagon::L2_loadbsw4_pr:
2259  case Hexagon::L2_loadbsw4_pbr:
2260  case Hexagon::L2_loadbsw4_pi:
2261  case Hexagon::L2_loadbsw4_pci:
2262  case Hexagon::L2_loadbsw4_pcr:
2263  case Hexagon::L4_loadrb_rr:
2264  case Hexagon::L2_ploadrbt_io:
2265  case Hexagon::L2_ploadrbt_pi:
2266  case Hexagon::L2_ploadrbf_io:
2267  case Hexagon::L2_ploadrbf_pi:
2268  case Hexagon::L2_ploadrbtnew_io:
2269  case Hexagon::L2_ploadrbfnew_io:
2270  case Hexagon::L4_ploadrbt_rr:
2271  case Hexagon::L4_ploadrbf_rr:
2272  case Hexagon::L4_ploadrbtnew_rr:
2273  case Hexagon::L4_ploadrbfnew_rr:
2274  case Hexagon::L2_ploadrbtnew_pi:
2275  case Hexagon::L2_ploadrbfnew_pi:
2276  case Hexagon::L4_ploadrbt_abs:
2277  case Hexagon::L4_ploadrbf_abs:
2278  case Hexagon::L4_ploadrbtnew_abs:
2279  case Hexagon::L4_ploadrbfnew_abs:
2280  case Hexagon::L2_loadrbgp:
2281  // Half
2282  case Hexagon::L2_loadrh_io:
2283  case Hexagon::L4_loadrh_ur:
2284  case Hexagon::L4_loadrh_ap:
2285  case Hexagon::L2_loadrh_pr:
2286  case Hexagon::L2_loadrh_pbr:
2287  case Hexagon::L2_loadrh_pi:
2288  case Hexagon::L2_loadrh_pci:
2289  case Hexagon::L2_loadrh_pcr:
2290  case Hexagon::L4_loadrh_rr:
2291  case Hexagon::L2_ploadrht_io:
2292  case Hexagon::L2_ploadrht_pi:
2293  case Hexagon::L2_ploadrhf_io:
2294  case Hexagon::L2_ploadrhf_pi:
2295  case Hexagon::L2_ploadrhtnew_io:
2296  case Hexagon::L2_ploadrhfnew_io:
2297  case Hexagon::L4_ploadrht_rr:
2298  case Hexagon::L4_ploadrhf_rr:
2299  case Hexagon::L4_ploadrhtnew_rr:
2300  case Hexagon::L4_ploadrhfnew_rr:
2301  case Hexagon::L2_ploadrhtnew_pi:
2302  case Hexagon::L2_ploadrhfnew_pi:
2303  case Hexagon::L4_ploadrht_abs:
2304  case Hexagon::L4_ploadrhf_abs:
2305  case Hexagon::L4_ploadrhtnew_abs:
2306  case Hexagon::L4_ploadrhfnew_abs:
2307  case Hexagon::L2_loadrhgp:
2308  return true;
2309  default:
2310  return false;
2311  }
2312 }
2313 
2315  const uint64_t F = MI.getDesc().TSFlags;
2316  return (F >> HexagonII::SoloPos) & HexagonII::SoloMask;
2317 }
2318 
2320  switch (MI.getOpcode()) {
2321  case Hexagon::STriw_pred:
2322  case Hexagon::LDriw_pred:
2323  return true;
2324  default:
2325  return false;
2326  }
2327 }
2328 
2330  if (!MI.isBranch())
2331  return false;
2332 
2333  for (auto &Op : MI.operands())
2334  if (Op.isGlobal() || Op.isSymbol())
2335  return true;
2336  return false;
2337 }
2338 
2339 // Returns true when SU has a timing class TC1.
2341  unsigned SchedClass = MI.getDesc().getSchedClass();
2342  return is_TC1(SchedClass);
2343 }
2344 
2346  unsigned SchedClass = MI.getDesc().getSchedClass();
2347  return is_TC2(SchedClass);
2348 }
2349 
2351  unsigned SchedClass = MI.getDesc().getSchedClass();
2352  return is_TC2early(SchedClass);
2353 }
2354 
2356  unsigned SchedClass = MI.getDesc().getSchedClass();
2357  return is_TC4x(SchedClass);
2358 }
2359 
2360 // Schedule this ASAP.
2362  const MachineInstr &MI2) const {
2363  if (mayBeCurLoad(MI1)) {
2364  // if (result of SU is used in Next) return true;
2365  unsigned DstReg = MI1.getOperand(0).getReg();
2366  int N = MI2.getNumOperands();
2367  for (int I = 0; I < N; I++)
2368  if (MI2.getOperand(I).isReg() && DstReg == MI2.getOperand(I).getReg())
2369  return true;
2370  }
2371  if (mayBeNewStore(MI2))
2372  if (MI2.getOpcode() == Hexagon::V6_vS32b_pi)
2373  if (MI1.getOperand(0).isReg() && MI2.getOperand(3).isReg() &&
2374  MI1.getOperand(0).getReg() == MI2.getOperand(3).getReg())
2375  return true;
2376  return false;
2377 }
2378 
2380  const uint64_t V = getType(MI);
2382 }
2383 
2384 // Check if the Offset is a valid auto-inc imm by Load/Store Type.
2386  int Size = VT.getSizeInBits() / 8;
2387  if (Offset % Size != 0)
2388  return false;
2389  int Count = Offset / Size;
2390 
2391  switch (VT.getSimpleVT().SimpleTy) {
2392  // For scalars the auto-inc is s4
2393  case MVT::i8:
2394  case MVT::i16:
2395  case MVT::i32:
2396  case MVT::i64:
2397  return isInt<4>(Count);
2398  // For HVX vectors the auto-inc is s3
2399  case MVT::v64i8:
2400  case MVT::v32i16:
2401  case MVT::v16i32:
2402  case MVT::v8i64:
2403  case MVT::v128i8:
2404  case MVT::v64i16:
2405  case MVT::v32i32:
2406  case MVT::v16i64:
2407  return isInt<3>(Count);
2408  default:
2409  break;
2410  }
2411 
2412  llvm_unreachable("Not an valid type!");
2413 }
2414 
2415 bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
2416  const TargetRegisterInfo *TRI, bool Extend) const {
2417  // This function is to check whether the "Offset" is in the correct range of
2418  // the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is
2419  // inserted to calculate the final address. Due to this reason, the function
2420  // assumes that the "Offset" has correct alignment.
2421  // We used to assert if the offset was not properly aligned, however,
2422  // there are cases where a misaligned pointer recast can cause this
2423  // problem, and we need to allow for it. The front end warns of such
2424  // misaligns with respect to load size.
2425  switch (Opcode) {
2426  case Hexagon::PS_vstorerq_ai:
2427  case Hexagon::PS_vstorerw_ai:
2428  case Hexagon::PS_vstorerw_nt_ai:
2429  case Hexagon::PS_vloadrq_ai:
2430  case Hexagon::PS_vloadrw_ai:
2431  case Hexagon::PS_vloadrw_nt_ai:
2432  case Hexagon::V6_vL32b_ai:
2433  case Hexagon::V6_vS32b_ai:
2434  case Hexagon::V6_vL32b_nt_ai:
2435  case Hexagon::V6_vS32b_nt_ai:
2436  case Hexagon::V6_vL32Ub_ai:
2437  case Hexagon::V6_vS32Ub_ai: {
2438  unsigned VectorSize = TRI->getSpillSize(Hexagon::HvxVRRegClass);
2439  assert(isPowerOf2_32(VectorSize));
2440  if (Offset & (VectorSize-1))
2441  return false;
2442  return isInt<4>(Offset >> Log2_32(VectorSize));
2443  }
2444 
2445  case Hexagon::J2_loop0i:
2446  case Hexagon::J2_loop1i:
2447  return isUInt<10>(Offset);
2448 
2449  case Hexagon::S4_storeirb_io:
2450  case Hexagon::S4_storeirbt_io:
2451  case Hexagon::S4_storeirbf_io:
2452  return isUInt<6>(Offset);
2453 
2454  case Hexagon::S4_storeirh_io:
2455  case Hexagon::S4_storeirht_io:
2456  case Hexagon::S4_storeirhf_io:
2457  return isShiftedUInt<6,1>(Offset);
2458 
2459  case Hexagon::S4_storeiri_io:
2460  case Hexagon::S4_storeirit_io:
2461  case Hexagon::S4_storeirif_io:
2462  return isShiftedUInt<6,2>(Offset);
2463  }
2464 
2465  if (Extend)
2466  return true;
2467 
2468  switch (Opcode) {
2469  case Hexagon::L2_loadri_io:
2470  case Hexagon::S2_storeri_io:
2471  return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
2472  (Offset <= Hexagon_MEMW_OFFSET_MAX);
2473 
2474  case Hexagon::L2_loadrd_io:
2475  case Hexagon::S2_storerd_io:
2476  return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
2477  (Offset <= Hexagon_MEMD_OFFSET_MAX);
2478 
2479  case Hexagon::L2_loadrh_io:
2480  case Hexagon::L2_loadruh_io:
2481  case Hexagon::S2_storerh_io:
2482  case Hexagon::S2_storerf_io:
2483  return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
2484  (Offset <= Hexagon_MEMH_OFFSET_MAX);
2485 
2486  case Hexagon::L2_loadrb_io:
2487  case Hexagon::L2_loadrub_io:
2488  case Hexagon::S2_storerb_io:
2489  return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
2490  (Offset <= Hexagon_MEMB_OFFSET_MAX);
2491 
2492  case Hexagon::A2_addi:
2493  return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
2494  (Offset <= Hexagon_ADDI_OFFSET_MAX);
2495 
2496  case Hexagon::L4_iadd_memopw_io:
2497  case Hexagon::L4_isub_memopw_io:
2498  case Hexagon::L4_add_memopw_io:
2499  case Hexagon::L4_sub_memopw_io:
2500  case Hexagon::L4_and_memopw_io:
2501  case Hexagon::L4_or_memopw_io:
2502  return (0 <= Offset && Offset <= 255);
2503 
2504  case Hexagon::L4_iadd_memoph_io:
2505  case Hexagon::L4_isub_memoph_io:
2506  case Hexagon::L4_add_memoph_io:
2507  case Hexagon::L4_sub_memoph_io:
2508  case Hexagon::L4_and_memoph_io:
2509  case Hexagon::L4_or_memoph_io:
2510  return (0 <= Offset && Offset <= 127);
2511 
2512  case Hexagon::L4_iadd_memopb_io:
2513  case Hexagon::L4_isub_memopb_io:
2514  case Hexagon::L4_add_memopb_io:
2515  case Hexagon::L4_sub_memopb_io:
2516  case Hexagon::L4_and_memopb_io:
2517  case Hexagon::L4_or_memopb_io:
2518  return (0 <= Offset && Offset <= 63);
2519 
2520  // LDriw_xxx and STriw_xxx are pseudo operations, so it has to take offset of
2521  // any size. Later pass knows how to handle it.
2522  case Hexagon::STriw_pred:
2523  case Hexagon::LDriw_pred:
2524  case Hexagon::STriw_mod:
2525  case Hexagon::LDriw_mod:
2526  return true;
2527 
2528  case Hexagon::PS_fi:
2529  case Hexagon::PS_fia:
2530  case Hexagon::INLINEASM:
2531  return true;
2532 
2533  case Hexagon::L2_ploadrbt_io:
2534  case Hexagon::L2_ploadrbf_io:
2535  case Hexagon::L2_ploadrubt_io:
2536  case Hexagon::L2_ploadrubf_io:
2537  case Hexagon::S2_pstorerbt_io:
2538  case Hexagon::S2_pstorerbf_io:
2539  return isUInt<6>(Offset);
2540 
2541  case Hexagon::L2_ploadrht_io:
2542  case Hexagon::L2_ploadrhf_io:
2543  case Hexagon::L2_ploadruht_io:
2544  case Hexagon::L2_ploadruhf_io:
2545  case Hexagon::S2_pstorerht_io:
2546  case Hexagon::S2_pstorerhf_io:
2547  return isShiftedUInt<6,1>(Offset);
2548 
2549  case Hexagon::L2_ploadrit_io:
2550  case Hexagon::L2_ploadrif_io:
2551  case Hexagon::S2_pstorerit_io:
2552  case Hexagon::S2_pstorerif_io:
2553  return isShiftedUInt<6,2>(Offset);
2554 
2555  case Hexagon::L2_ploadrdt_io:
2556  case Hexagon::L2_ploadrdf_io:
2557  case Hexagon::S2_pstorerdt_io:
2558  case Hexagon::S2_pstorerdf_io:
2559  return isShiftedUInt<6,3>(Offset);
2560  } // switch
2561 
2562  llvm_unreachable("No offset range is defined for this opcode. "
2563  "Please define it in the above switch statement!");
2564 }
2565 
2567  return isHVXVec(MI) && isAccumulator(MI);
2568 }
2569 
2571  const uint64_t F = get(MI.getOpcode()).TSFlags;
2572  const uint64_t V = ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
2573  return
2574  V == HexagonII::TypeCVI_VA ||
2576 }
2577 
2579  const MachineInstr &ConsMI) const {
2580  if (EnableACCForwarding && isVecAcc(ProdMI) && isVecAcc(ConsMI))
2581  return true;
2582 
2583  if (EnableALUForwarding && (isVecALU(ConsMI) || isLateSourceInstr(ConsMI)))
2584  return true;
2585 
2586  if (mayBeNewStore(ConsMI))
2587  return true;
2588 
2589  return false;
2590 }
2591 
2593  switch (MI.getOpcode()) {
2594  // Byte
2595  case Hexagon::L2_loadrub_io:
2596  case Hexagon::L4_loadrub_ur:
2597  case Hexagon::L4_loadrub_ap:
2598  case Hexagon::L2_loadrub_pr:
2599  case Hexagon::L2_loadrub_pbr:
2600  case Hexagon::L2_loadrub_pi:
2601  case Hexagon::L2_loadrub_pci:
2602  case Hexagon::L2_loadrub_pcr:
2603  case Hexagon::L2_loadbzw2_io:
2604  case Hexagon::L4_loadbzw2_ur:
2605  case Hexagon::L4_loadbzw2_ap:
2606  case Hexagon::L2_loadbzw2_pr:
2607  case Hexagon::L2_loadbzw2_pbr:
2608  case Hexagon::L2_loadbzw2_pi:
2609  case Hexagon::L2_loadbzw2_pci:
2610  case Hexagon::L2_loadbzw2_pcr:
2611  case Hexagon::L2_loadbzw4_io:
2612  case Hexagon::L4_loadbzw4_ur:
2613  case Hexagon::L4_loadbzw4_ap:
2614  case Hexagon::L2_loadbzw4_pr:
2615  case Hexagon::L2_loadbzw4_pbr:
2616  case Hexagon::L2_loadbzw4_pi:
2617  case Hexagon::L2_loadbzw4_pci:
2618  case Hexagon::L2_loadbzw4_pcr:
2619  case Hexagon::L4_loadrub_rr:
2620  case Hexagon::L2_ploadrubt_io:
2621  case Hexagon::L2_ploadrubt_pi:
2622  case Hexagon::L2_ploadrubf_io:
2623  case Hexagon::L2_ploadrubf_pi:
2624  case Hexagon::L2_ploadrubtnew_io:
2625  case Hexagon::L2_ploadrubfnew_io:
2626  case Hexagon::L4_ploadrubt_rr:
2627  case Hexagon::L4_ploadrubf_rr:
2628  case Hexagon::L4_ploadrubtnew_rr:
2629  case Hexagon::L4_ploadrubfnew_rr:
2630  case Hexagon::L2_ploadrubtnew_pi:
2631  case Hexagon::L2_ploadrubfnew_pi:
2632  case Hexagon::L4_ploadrubt_abs:
2633  case Hexagon::L4_ploadrubf_abs:
2634  case Hexagon::L4_ploadrubtnew_abs:
2635  case Hexagon::L4_ploadrubfnew_abs:
2636  case Hexagon::L2_loadrubgp:
2637  // Half
2638  case Hexagon::L2_loadruh_io:
2639  case Hexagon::L4_loadruh_ur:
2640  case Hexagon::L4_loadruh_ap:
2641  case Hexagon::L2_loadruh_pr:
2642  case Hexagon::L2_loadruh_pbr:
2643  case Hexagon::L2_loadruh_pi:
2644  case Hexagon::L2_loadruh_pci:
2645  case Hexagon::L2_loadruh_pcr:
2646  case Hexagon::L4_loadruh_rr:
2647  case Hexagon::L2_ploadruht_io:
2648  case Hexagon::L2_ploadruht_pi:
2649  case Hexagon::L2_ploadruhf_io:
2650  case Hexagon::L2_ploadruhf_pi:
2651  case Hexagon::L2_ploadruhtnew_io:
2652  case Hexagon::L2_ploadruhfnew_io:
2653  case Hexagon::L4_ploadruht_rr:
2654  case Hexagon::L4_ploadruhf_rr:
2655  case Hexagon::L4_ploadruhtnew_rr:
2656  case Hexagon::L4_ploadruhfnew_rr:
2657  case Hexagon::L2_ploadruhtnew_pi:
2658  case Hexagon::L2_ploadruhfnew_pi:
2659  case Hexagon::L4_ploadruht_abs:
2660  case Hexagon::L4_ploadruhf_abs:
2661  case Hexagon::L4_ploadruhtnew_abs:
2662  case Hexagon::L4_ploadruhfnew_abs:
2663  case Hexagon::L2_loadruhgp:
2664  return true;
2665  default:
2666  return false;
2667  }
2668 }
2669 
2670 // Add latency to instruction.
2672  const MachineInstr &MI2) const {
2673  if (isHVXVec(MI1) && isHVXVec(MI2))
2674  if (!isVecUsableNextPacket(MI1, MI2))
2675  return true;
2676  return false;
2677 }
2678 
2679 /// \brief Get the base register and byte offset of a load/store instr.
2681  unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI)
2682  const {
2683  unsigned AccessSize = 0;
2684  int OffsetVal = 0;
2685  BaseReg = getBaseAndOffset(LdSt, OffsetVal, AccessSize);
2686  Offset = OffsetVal;
2687  return BaseReg != 0;
2688 }
2689 
2690 /// \brief Can these instructions execute at the same time in a bundle.
2692  const MachineInstr &Second) const {
2693  if (Second.mayStore() && First.getOpcode() == Hexagon::S2_allocframe) {
2694  const MachineOperand &Op = Second.getOperand(0);
2695  if (Op.isReg() && Op.isUse() && Op.getReg() == Hexagon::R29)
2696  return true;
2697  }
2698  if (DisableNVSchedule)
2699  return false;
2700  if (mayBeNewStore(Second)) {
2701  // Make sure the definition of the first instruction is the value being
2702  // stored.
2703  const MachineOperand &Stored =
2704  Second.getOperand(Second.getNumOperands() - 1);
2705  if (!Stored.isReg())
2706  return false;
2707  for (unsigned i = 0, e = First.getNumOperands(); i < e; ++i) {
2708  const MachineOperand &Op = First.getOperand(i);
2709  if (Op.isReg() && Op.isDef() && Op.getReg() == Stored.getReg())
2710  return true;
2711  }
2712  }
2713  return false;
2714 }
2715 
2717  unsigned Opc = CallMI.getOpcode();
2718  return Opc == Hexagon::PS_call_nr || Opc == Hexagon::PS_callr_nr;
2719 }
2720 
2722  for (auto &I : *B)
2723  if (I.isEHLabel())
2724  return true;
2725  return false;
2726 }
2727 
2728 // Returns true if an instruction can be converted into a non-extended
2729 // equivalent instruction.
2731  short NonExtOpcode;
2732  // Check if the instruction has a register form that uses register in place
2733  // of the extended operand, if so return that as the non-extended form.
2734  if (Hexagon::getRegForm(MI.getOpcode()) >= 0)
2735  return true;
2736 
2737  if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
2738  // Check addressing mode and retrieve non-ext equivalent instruction.
2739 
2740  switch (getAddrMode(MI)) {
2741  case HexagonII::Absolute:
2742  // Load/store with absolute addressing mode can be converted into
2743  // base+offset mode.
2744  NonExtOpcode = Hexagon::changeAddrMode_abs_io(MI.getOpcode());
2745  break;
2747  // Load/store with base+offset addressing mode can be converted into
2748  // base+register offset addressing mode. However left shift operand should
2749  // be set to 0.
2750  NonExtOpcode = Hexagon::changeAddrMode_io_rr(MI.getOpcode());
2751  break;
2753  NonExtOpcode = Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
2754  break;
2755  default:
2756  return false;
2757  }
2758  if (NonExtOpcode < 0)
2759  return false;
2760  return true;
2761  }
2762  return false;
2763 }
2764 
2766  return Hexagon::getRealHWInstr(MI.getOpcode(),
2767  Hexagon::InstrType_Pseudo) >= 0;
2768 }
2769 
2771  const {
2773  while (I != E) {
2774  if (I->isBarrier())
2775  return true;
2776  ++I;
2777  }
2778  return false;
2779 }
2780 
2781 // Returns true, if a LD insn can be promoted to a cur load.
2783  const uint64_t F = MI.getDesc().TSFlags;
2785  Subtarget.hasV60TOps();
2786 }
2787 
2788 // Returns true, if a ST insn can be promoted to a new-value store.
2790  const uint64_t F = MI.getDesc().TSFlags;
2792 }
2793 
2795  const MachineInstr &ConsMI) const {
2796  // There is no stall when ProdMI is not a V60 vector.
2797  if (!isHVXVec(ProdMI))
2798  return false;
2799 
2800  // There is no stall when ProdMI and ConsMI are not dependent.
2801  if (!isDependent(ProdMI, ConsMI))
2802  return false;
2803 
2804  // When Forward Scheduling is enabled, there is no stall if ProdMI and ConsMI
2805  // are scheduled in consecutive packets.
2806  if (isVecUsableNextPacket(ProdMI, ConsMI))
2807  return false;
2808 
2809  return true;
2810 }
2811 
2814  // There is no stall when I is not a V60 vector.
2815  if (!isHVXVec(MI))
2816  return false;
2817 
2819  MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end();
2820 
2821  if (!(*MII).isBundle()) {
2822  const MachineInstr &J = *MII;
2823  return producesStall(J, MI);
2824  }
2825 
2826  for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
2827  const MachineInstr &J = *MII;
2828  if (producesStall(J, MI))
2829  return true;
2830  }
2831  return false;
2832 }
2833 
2835  unsigned PredReg) const {
2836  for (const MachineOperand &MO : MI.operands()) {
2837  // Predicate register must be explicitly defined.
2838  if (MO.isRegMask() && MO.clobbersPhysReg(PredReg))
2839  return false;
2840  if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
2841  return false;
2842  }
2843 
2844  // Hexagon Programmer's Reference says that decbin, memw_locked, and
2845  // memd_locked cannot be used as .new as well,
2846  // but we don't seem to have these instructions defined.
2847  return MI.getOpcode() != Hexagon::A4_tlbmatch;
2848 }
2849 
2850 bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const {
2851  return Opcode == Hexagon::J2_jumpt ||
2852  Opcode == Hexagon::J2_jumptpt ||
2853  Opcode == Hexagon::J2_jumpf ||
2854  Opcode == Hexagon::J2_jumpfpt ||
2855  Opcode == Hexagon::J2_jumptnew ||
2856  Opcode == Hexagon::J2_jumpfnew ||
2857  Opcode == Hexagon::J2_jumptnewpt ||
2858  Opcode == Hexagon::J2_jumpfnewpt;
2859 }
2860 
2862  if (Cond.empty() || !isPredicated(Cond[0].getImm()))
2863  return false;
2864  return !isPredicatedTrue(Cond[0].getImm());
2865 }
2866 
2868  const uint64_t F = MI.getDesc().TSFlags;
2870 }
2871 
2872 // Returns the base register in a memory access (load/store). The offset is
2873 // returned in Offset and the access size is returned in AccessSize.
2874 // If the base register has a subregister or the offset field does not contain
2875 // an immediate value, return 0.
2877  int &Offset, unsigned &AccessSize) const {
2878  // Return if it is not a base+offset type instruction or a MemOp.
2881  !isMemOp(MI) && !isPostIncrement(MI))
2882  return 0;
2883 
2884  AccessSize = getMemAccessSize(MI);
2885 
2886  unsigned BasePos = 0, OffsetPos = 0;
2887  if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
2888  return 0;
2889 
2890  // Post increment updates its EA after the mem access,
2891  // so we need to treat its offset as zero.
2892  if (isPostIncrement(MI)) {
2893  Offset = 0;
2894  } else {
2895  const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
2896  if (!OffsetOp.isImm())
2897  return 0;
2898  Offset = OffsetOp.getImm();
2899  }
2900 
2901  const MachineOperand &BaseOp = MI.getOperand(BasePos);
2902  if (BaseOp.getSubReg() != 0)
2903  return 0;
2904  return BaseOp.getReg();
2905 }
2906 
2907 /// Return the position of the base and offset operands for this instruction.
2909  unsigned &BasePos, unsigned &OffsetPos) const {
2910  // Deal with memops first.
2911  if (isMemOp(MI)) {
2912  BasePos = 0;
2913  OffsetPos = 1;
2914  } else if (MI.mayStore()) {
2915  BasePos = 0;
2916  OffsetPos = 1;
2917  } else if (MI.mayLoad()) {
2918  BasePos = 1;
2919  OffsetPos = 2;
2920  } else
2921  return false;
2922 
2923  if (isPredicated(MI)) {
2924  BasePos++;
2925  OffsetPos++;
2926  }
2927  if (isPostIncrement(MI)) {
2928  BasePos++;
2929  OffsetPos++;
2930  }
2931 
2932  if (!MI.getOperand(BasePos).isReg() || !MI.getOperand(OffsetPos).isImm())
2933  return false;
2934 
2935  return true;
2936 }
2937 
2938 // Inserts branching instructions in reverse order of their occurrence.
2939 // e.g. jump_t t1 (i1)
2940 // jump t2 (i2)
2941 // Jumpers = {i2, i1}
2943  MachineBasicBlock& MBB) const {
2945  // If the block has no terminators, it just falls into the block after it.
2947  if (I == MBB.instr_begin())
2948  return Jumpers;
2949 
2950  // A basic block may looks like this:
2951  //
2952  // [ insn
2953  // EH_LABEL
2954  // insn
2955  // insn
2956  // insn
2957  // EH_LABEL
2958  // insn ]
2959  //
2960  // It has two succs but does not have a terminator
2961  // Don't know how to handle it.
2962  do {
2963  --I;
2964  if (I->isEHLabel())
2965  return Jumpers;
2966  } while (I != MBB.instr_begin());
2967 
2968  I = MBB.instr_end();
2969  --I;
2970 
2971  while (I->isDebugValue()) {
2972  if (I == MBB.instr_begin())
2973  return Jumpers;
2974  --I;
2975  }
2976  if (!isUnpredicatedTerminator(*I))
2977  return Jumpers;
2978 
2979  // Get the last instruction in the block.
2980  MachineInstr *LastInst = &*I;
2981  Jumpers.push_back(LastInst);
2982  MachineInstr *SecondLastInst = nullptr;
2983  // Find one more terminator if present.
2984  do {
2985  if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
2986  if (!SecondLastInst) {
2987  SecondLastInst = &*I;
2988  Jumpers.push_back(SecondLastInst);
2989  } else // This is a third branch.
2990  return Jumpers;
2991  }
2992  if (I == MBB.instr_begin())
2993  break;
2994  --I;
2995  } while (true);
2996  return Jumpers;
2997 }
2998 
2999 // Returns Operand Index for the constant extended instruction.
3001  const uint64_t F = MI.getDesc().TSFlags;
3003 }
3004 
3005 // See if instruction could potentially be a duplex candidate.
3006 // If so, return its group. Zero otherwise.
3008  const MachineInstr &MI) const {
3009  unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3010 
3011  switch (MI.getOpcode()) {
3012  default:
3013  return HexagonII::HCG_None;
3014  //
3015  // Compound pairs.
3016  // "p0=cmp.eq(Rs16,Rt16); if (p0.new) jump:nt #r9:2"
3017  // "Rd16=#U6 ; jump #r9:2"
3018  // "Rd16=Rs16 ; jump #r9:2"
3019  //
3020  case Hexagon::C2_cmpeq:
3021  case Hexagon::C2_cmpgt:
3022  case Hexagon::C2_cmpgtu:
3023  DstReg = MI.getOperand(0).getReg();
3024  Src1Reg = MI.getOperand(1).getReg();
3025  Src2Reg = MI.getOperand(2).getReg();
3026  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3027  (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3028  isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg))
3029  return HexagonII::HCG_A;
3030  break;
3031  case Hexagon::C2_cmpeqi:
3032  case Hexagon::C2_cmpgti:
3033  case Hexagon::C2_cmpgtui:
3034  // P0 = cmp.eq(Rs,#u2)
3035  DstReg = MI.getOperand(0).getReg();
3036  SrcReg = MI.getOperand(1).getReg();
3037  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3038  (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3039  isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
3040  ((isUInt<5>(MI.getOperand(2).getImm())) ||
3041  (MI.getOperand(2).getImm() == -1)))
3042  return HexagonII::HCG_A;
3043  break;
3044  case Hexagon::A2_tfr:
3045  // Rd = Rs
3046  DstReg = MI.getOperand(0).getReg();
3047  SrcReg = MI.getOperand(1).getReg();
3048  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3049  return HexagonII::HCG_A;
3050  break;
3051  case Hexagon::A2_tfrsi:
3052  // Rd = #u6
3053  // Do not test for #u6 size since the const is getting extended
3054  // regardless and compound could be formed.
3055  DstReg = MI.getOperand(0).getReg();
3056  if (isIntRegForSubInst(DstReg))
3057  return HexagonII::HCG_A;
3058  break;
3059  case Hexagon::S2_tstbit_i:
3060  DstReg = MI.getOperand(0).getReg();
3061  Src1Reg = MI.getOperand(1).getReg();
3062  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3063  (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3064  MI.getOperand(2).isImm() &&
3065  isIntRegForSubInst(Src1Reg) && (MI.getOperand(2).getImm() == 0))
3066  return HexagonII::HCG_A;
3067  break;
3068  // The fact that .new form is used pretty much guarantees
3069  // that predicate register will match. Nevertheless,
3070  // there could be some false positives without additional
3071  // checking.
3072  case Hexagon::J2_jumptnew:
3073  case Hexagon::J2_jumpfnew:
3074  case Hexagon::J2_jumptnewpt:
3075  case Hexagon::J2_jumpfnewpt:
3076  Src1Reg = MI.getOperand(0).getReg();
3077  if (Hexagon::PredRegsRegClass.contains(Src1Reg) &&
3078  (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
3079  return HexagonII::HCG_B;
3080  break;
3081  // Transfer and jump:
3082  // Rd=#U6 ; jump #r9:2
3083  // Rd=Rs ; jump #r9:2
3084  // Do not test for jump range here.
3085  case Hexagon::J2_jump:
3086  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3087  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3088  return HexagonII::HCG_C;
3089  }
3090 
3091  return HexagonII::HCG_None;
3092 }
3093 
3094 // Returns -1 when there is no opcode found.
3096  const MachineInstr &GB) const {
3099  if ((GA.getOpcode() != Hexagon::C2_cmpeqi) ||
3100  (GB.getOpcode() != Hexagon::J2_jumptnew))
3101  return -1;
3102  unsigned DestReg = GA.getOperand(0).getReg();
3103  if (!GB.readsRegister(DestReg))
3104  return -1;
3105  if (DestReg == Hexagon::P0)
3106  return Hexagon::J4_cmpeqi_tp0_jump_nt;
3107  if (DestReg == Hexagon::P1)
3108  return Hexagon::J4_cmpeqi_tp1_jump_nt;
3109  return -1;
3110 }
3111 
3112 int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const {
3113  enum Hexagon::PredSense inPredSense;
3114  inPredSense = invertPredicate ? Hexagon::PredSense_false :
3115  Hexagon::PredSense_true;
3116  int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
3117  if (CondOpcode >= 0) // Valid Conditional opcode/instruction
3118  return CondOpcode;
3119 
3120  llvm_unreachable("Unexpected predicable instruction");
3121 }
3122 
3123 // Return the cur value instruction for a given store.
3125  switch (MI.getOpcode()) {
3126  default: llvm_unreachable("Unknown .cur type");
3127  case Hexagon::V6_vL32b_pi:
3128  return Hexagon::V6_vL32b_cur_pi;
3129  case Hexagon::V6_vL32b_ai:
3130  return Hexagon::V6_vL32b_cur_ai;
3131  case Hexagon::V6_vL32b_nt_pi:
3132  return Hexagon::V6_vL32b_nt_cur_pi;
3133  case Hexagon::V6_vL32b_nt_ai:
3134  return Hexagon::V6_vL32b_nt_cur_ai;
3135  }
3136  return 0;
3137 }
3138 
3139 // Return the regular version of the .cur instruction.
3141  switch (MI.getOpcode()) {
3142  default: llvm_unreachable("Unknown .cur type");
3143  case Hexagon::V6_vL32b_cur_pi:
3144  return Hexagon::V6_vL32b_pi;
3145  case Hexagon::V6_vL32b_cur_ai:
3146  return Hexagon::V6_vL32b_ai;
3147  case Hexagon::V6_vL32b_nt_cur_pi:
3148  return Hexagon::V6_vL32b_nt_pi;
3149  case Hexagon::V6_vL32b_nt_cur_ai:
3150  return Hexagon::V6_vL32b_nt_ai;
3151  }
3152  return 0;
3153 }
3154 
3155 // The diagram below shows the steps involved in the conversion of a predicated
3156 // store instruction to its .new predicated new-value form.
3157 //
3158 // Note: It doesn't include conditional new-value stores as they can't be
3159 // converted to .new predicate.
3160 //
3161 // p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
3162 // ^ ^
3163 // / \ (not OK. it will cause new-value store to be
3164 // / X conditional on p0.new while R2 producer is
3165 // / \ on p0)
3166 // / \.
3167 // p.new store p.old NV store
3168 // [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
3169 // ^ ^
3170 // \ /
3171 // \ /
3172 // \ /
3173 // p.old store
3174 // [if (p0)memw(R0+#0)=R2]
3175 //
3176 // The following set of instructions further explains the scenario where
3177 // conditional new-value store becomes invalid when promoted to .new predicate
3178 // form.
3179 //
3180 // { 1) if (p0) r0 = add(r1, r2)
3181 // 2) p0 = cmp.eq(r3, #0) }
3182 //
3183 // 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
3184 // the first two instructions because in instr 1, r0 is conditional on old value
3185 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
3186 // is not valid for new-value stores.
3187 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
3188 // from the "Conditional Store" list. Because a predicated new value store
3189 // would NOT be promoted to a double dot new store. See diagram below:
3190 // This function returns yes for those stores that are predicated but not
3191 // yet promoted to predicate dot new instructions.
3192 //
3193 // +---------------------+
3194 // /-----| if (p0) memw(..)=r0 |---------\~
3195 // || +---------------------+ ||
3196 // promote || /\ /\ || promote
3197 // || /||\ /||\ ||
3198 // \||/ demote || \||/
3199 // \/ || || \/
3200 // +-------------------------+ || +-------------------------+
3201 // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
3202 // +-------------------------+ || +-------------------------+
3203 // || || ||
3204 // || demote \||/
3205 // promote || \/ NOT possible
3206 // || || /\~
3207 // \||/ || /||\~
3208 // \/ || ||
3209 // +-----------------------------+
3210 // | if (p0.new) memw(..)=r0.new |
3211 // +-----------------------------+
3212 // Double Dot New Store
3213 //
3214 // Returns the most basic instruction for the .new predicated instructions and
3215 // new-value stores.
3216 // For example, all of the following instructions will be converted back to the
3217 // same instruction:
3218 // 1) if (p0.new) memw(R0+#0) = R1.new --->
3219 // 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
3220 // 3) if (p0.new) memw(R0+#0) = R1 --->
3221 //
3222 // To understand the translation of instruction 1 to its original form, consider
3223 // a packet with 3 instructions.
3224 // { p0 = cmp.eq(R0,R1)
3225 // if (p0.new) R2 = add(R3, R4)
3226 // R5 = add (R3, R1)
3227 // }
3228 // if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
3229 //
3230 // This instruction can be part of the previous packet only if both p0 and R2
3231 // are promoted to .new values. This promotion happens in steps, first
3232 // predicate register is promoted to .new and in the next iteration R2 is
3233 // promoted. Therefore, in case of dependence check failure (due to R5) during
3234 // next iteration, it should be converted back to its most basic form.
3235 
3236 // Return the new value instruction for a given store.
3238  int NVOpcode = Hexagon::getNewValueOpcode(MI.getOpcode());
3239  if (NVOpcode >= 0) // Valid new-value store instruction.
3240  return NVOpcode;
3241 
3242  switch (MI.getOpcode()) {
3243  default:
3244  report_fatal_error(std::string("Unknown .new type: ") +
3245  std::to_string(MI.getOpcode()));
3246  case Hexagon::S4_storerb_ur:
3247  return Hexagon::S4_storerbnew_ur;
3248 
3249  case Hexagon::S2_storerb_pci:
3250  return Hexagon::S2_storerb_pci;
3251 
3252  case Hexagon::S2_storeri_pci:
3253  return Hexagon::S2_storeri_pci;
3254 
3255  case Hexagon::S2_storerh_pci:
3256  return Hexagon::S2_storerh_pci;
3257 
3258  case Hexagon::S2_storerd_pci:
3259  return Hexagon::S2_storerd_pci;
3260 
3261  case Hexagon::S2_storerf_pci:
3262  return Hexagon::S2_storerf_pci;
3263 
3264  case Hexagon::V6_vS32b_ai:
3265  return Hexagon::V6_vS32b_new_ai;
3266 
3267  case Hexagon::V6_vS32b_pi:
3268  return Hexagon::V6_vS32b_new_pi;
3269  }
3270  return 0;
3271 }
3272 
3273 // Returns the opcode to use when converting MI, which is a conditional jump,
3274 // into a conditional instruction which uses the .new value of the predicate.
3275 // We also use branch probabilities to add a hint to the jump.
3276 // If MBPI is null, all edges will be treated as equally likely for the
3277 // purposes of establishing a predication hint.
3279  const MachineBranchProbabilityInfo *MBPI) const {
3280  // We assume that block can have at most two successors.
3281  const MachineBasicBlock *Src = MI.getParent();
3282  const MachineOperand &BrTarget = MI.getOperand(1);
3283  bool Taken = false;
3284  const BranchProbability OneHalf(1, 2);
3285 
3286  auto getEdgeProbability = [MBPI] (const MachineBasicBlock *Src,
3287  const MachineBasicBlock *Dst) {
3288  if (MBPI)
3289  return MBPI->getEdgeProbability(Src, Dst);
3290  return BranchProbability(1, Src->succ_size());
3291  };
3292 
3293  if (BrTarget.isMBB()) {
3294  const MachineBasicBlock *Dst = BrTarget.getMBB();
3295  Taken = getEdgeProbability(Src, Dst) >= OneHalf;
3296  } else {
3297  // The branch target is not a basic block (most likely a function).
3298  // Since BPI only gives probabilities for targets that are basic blocks,
3299  // try to identify another target of this branch (potentially a fall-
3300  // -through) and check the probability of that target.
3301  //
3302  // The only handled branch combinations are:
3303  // - one conditional branch,
3304  // - one conditional branch followed by one unconditional branch.
3305  // Otherwise, assume not-taken.
3307  const MachineBasicBlock &B = *MI.getParent();
3308  bool SawCond = false, Bad = false;
3309  for (const MachineInstr &I : B) {
3310  if (!I.isBranch())
3311  continue;
3312  if (I.isConditionalBranch()) {
3313  SawCond = true;
3314  if (&I != &MI) {
3315  Bad = true;
3316  break;
3317  }
3318  }
3319  if (I.isUnconditionalBranch() && !SawCond) {
3320  Bad = true;
3321  break;
3322  }
3323  }
3324  if (!Bad) {
3326  MachineBasicBlock::const_instr_iterator NextIt = std::next(It);
3327  if (NextIt == B.instr_end()) {
3328  // If this branch is the last, look for the fall-through block.
3329  for (const MachineBasicBlock *SB : B.successors()) {
3330  if (!B.isLayoutSuccessor(SB))
3331  continue;
3332  Taken = getEdgeProbability(Src, SB) < OneHalf;
3333  break;
3334  }
3335  } else {
3336  assert(NextIt->isUnconditionalBranch());
3337  // Find the first MBB operand and assume it's the target.
3338  const MachineBasicBlock *BT = nullptr;
3339  for (const MachineOperand &Op : NextIt->operands()) {
3340  if (!Op.isMBB())
3341  continue;
3342  BT = Op.getMBB();
3343  break;
3344  }
3345  Taken = BT && getEdgeProbability(Src, BT) < OneHalf;
3346  }
3347  } // if (!Bad)
3348  }
3349 
3350  // The Taken flag should be set to something reasonable by this point.
3351 
3352  switch (MI.getOpcode()) {
3353  case Hexagon::J2_jumpt:
3354  return Taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3355  case Hexagon::J2_jumpf:
3356  return Taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3357 
3358  default:
3359  llvm_unreachable("Unexpected jump instruction.");
3360  }
3361 }
3362 
3363 // Return .new predicate version for an instruction.
3365  const MachineBranchProbabilityInfo *MBPI) const {
3366  switch (MI.getOpcode()) {
3367  // Condtional Jumps
3368  case Hexagon::J2_jumpt:
3369  case Hexagon::J2_jumpf:
3370  return getDotNewPredJumpOp(MI, MBPI);
3371  }
3372 
3373  int NewOpcode = Hexagon::getPredNewOpcode(MI.getOpcode());
3374  if (NewOpcode >= 0)
3375  return NewOpcode;
3376  return 0;
3377 }
3378 
3380  int NewOp = MI.getOpcode();
3381  if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
3382  NewOp = Hexagon::getPredOldOpcode(NewOp);
3383  // All Hexagon architectures have prediction bits on dot-new branches,
3384  // but only Hexagon V60+ has prediction bits on dot-old ones. Make sure
3385  // to pick the right opcode when converting back to dot-old.
3386  if (!Subtarget.getFeatureBits()[Hexagon::ArchV60]) {
3387  switch (NewOp) {
3388  case Hexagon::J2_jumptpt:
3389  NewOp = Hexagon::J2_jumpt;
3390  break;
3391  case Hexagon::J2_jumpfpt:
3392  NewOp = Hexagon::J2_jumpf;
3393  break;
3394  case Hexagon::J2_jumprtpt:
3395  NewOp = Hexagon::J2_jumprt;
3396  break;
3397  case Hexagon::J2_jumprfpt:
3398  NewOp = Hexagon::J2_jumprf;
3399  break;
3400  }
3401  }
3402  assert(NewOp >= 0 &&
3403  "Couldn't change predicate new instruction to its old form.");
3404  }
3405 
3406  if (isNewValueStore(NewOp)) { // Convert into non-new-value format
3407  NewOp = Hexagon::getNonNVStore(NewOp);
3408  assert(NewOp >= 0 && "Couldn't change new-value store to its old form.");
3409  }
3410 
3411  if (Subtarget.hasV60TOps())
3412  return NewOp;
3413 
3414  // Subtargets prior to V60 didn't support 'taken' forms of predicated jumps.
3415  switch (NewOp) {
3416  case Hexagon::J2_jumpfpt:
3417  return Hexagon::J2_jumpf;
3418  case Hexagon::J2_jumptpt:
3419  return Hexagon::J2_jumpt;
3420  case Hexagon::J2_jumprfpt:
3421  return Hexagon::J2_jumprf;
3422  case Hexagon::J2_jumprtpt:
3423  return Hexagon::J2_jumprt;
3424  }
3425  return NewOp;
3426 }
3427 
3428 // See if instruction could potentially be a duplex candidate.
3429 // If so, return its group. Zero otherwise.
3431  const MachineInstr &MI) const {
3432  unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3433  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
3434 
3435  switch (MI.getOpcode()) {
3436  default:
3437  return HexagonII::HSIG_None;
3438  //
3439  // Group L1:
3440  //
3441  // Rd = memw(Rs+#u4:2)
3442  // Rd = memub(Rs+#u4:0)
3443  case Hexagon::L2_loadri_io:
3444  DstReg = MI.getOperand(0).getReg();
3445  SrcReg = MI.getOperand(1).getReg();
3446  // Special case this one from Group L2.
3447  // Rd = memw(r29+#u5:2)
3448  if (isIntRegForSubInst(DstReg)) {
3449  if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3450  HRI.getStackRegister() == SrcReg &&
3451  MI.getOperand(2).isImm() &&
3452  isShiftedUInt<5,2>(MI.getOperand(2).getImm()))
3453  return HexagonII::HSIG_L2;
3454  // Rd = memw(Rs+#u4:2)
3455  if (isIntRegForSubInst(SrcReg) &&
3456  (MI.getOperand(2).isImm() &&
3457  isShiftedUInt<4,2>(MI.getOperand(2).getImm())))
3458  return HexagonII::HSIG_L1;
3459  }
3460  break;
3461  case Hexagon::L2_loadrub_io:
3462  // Rd = memub(Rs+#u4:0)
3463  DstReg = MI.getOperand(0).getReg();
3464  SrcReg = MI.getOperand(1).getReg();
3465  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3466  MI.getOperand(2).isImm() && isUInt<4>(MI.getOperand(2).getImm()))
3467  return HexagonII::HSIG_L1;
3468  break;
3469  //
3470  // Group L2:
3471  //
3472  // Rd = memh/memuh(Rs+#u3:1)
3473  // Rd = memb(Rs+#u3:0)
3474  // Rd = memw(r29+#u5:2) - Handled above.
3475  // Rdd = memd(r29+#u5:3)
3476  // deallocframe
3477  // [if ([!]p0[.new])] dealloc_return
3478  // [if ([!]p0[.new])] jumpr r31
3479  case Hexagon::L2_loadrh_io:
3480  case Hexagon::L2_loadruh_io:
3481  // Rd = memh/memuh(Rs+#u3:1)
3482  DstReg = MI.getOperand(0).getReg();
3483  SrcReg = MI.getOperand(1).getReg();
3484  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3485  MI.getOperand(2).isImm() &&
3486  isShiftedUInt<3,1>(MI.getOperand(2).getImm()))
3487  return HexagonII::HSIG_L2;
3488  break;
3489  case Hexagon::L2_loadrb_io:
3490  // Rd = memb(Rs+#u3:0)
3491  DstReg = MI.getOperand(0).getReg();
3492  SrcReg = MI.getOperand(1).getReg();
3493  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3494  MI.getOperand(2).isImm() &&
3495  isUInt<3>(MI.getOperand(2).getImm()))
3496  return HexagonII::HSIG_L2;
3497  break;
3498  case Hexagon::L2_loadrd_io:
3499  // Rdd = memd(r29+#u5:3)
3500  DstReg = MI.getOperand(0).getReg();
3501  SrcReg = MI.getOperand(1).getReg();
3502  if (isDblRegForSubInst(DstReg, HRI) &&
3503  Hexagon::IntRegsRegClass.contains(SrcReg) &&
3504  HRI.getStackRegister() == SrcReg &&
3505  MI.getOperand(2).isImm() &&
3506  isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
3507  return HexagonII::HSIG_L2;
3508  break;
3509  // dealloc_return is not documented in Hexagon Manual, but marked
3510  // with A_SUBINSN attribute in iset_v4classic.py.
3511  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3512  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3513  case Hexagon::L4_return:
3514  case Hexagon::L2_deallocframe:
3515  return HexagonII::HSIG_L2;
3516  case Hexagon::EH_RETURN_JMPR:
3517  case Hexagon::PS_jmpret:
3518  // jumpr r31
3519  // Actual form JMPR %PC<imp-def>, %R31<imp-use>, %R0<imp-use,internal>.
3520  DstReg = MI.getOperand(0).getReg();
3521  if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
3522  return HexagonII::HSIG_L2;
3523  break;
3524  case Hexagon::PS_jmprett:
3525  case Hexagon::PS_jmpretf:
3526  case Hexagon::PS_jmprettnewpt:
3527  case Hexagon::PS_jmpretfnewpt:
3528  case Hexagon::PS_jmprettnew:
3529  case Hexagon::PS_jmpretfnew:
3530  DstReg = MI.getOperand(1).getReg();
3531  SrcReg = MI.getOperand(0).getReg();
3532  // [if ([!]p0[.new])] jumpr r31
3533  if ((Hexagon::PredRegsRegClass.contains(SrcReg) &&
3534  (Hexagon::P0 == SrcReg)) &&
3535  (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)))
3536  return HexagonII::HSIG_L2;
3537  break;
3538  case Hexagon::L4_return_t:
3539  case Hexagon::L4_return_f:
3540  case Hexagon::L4_return_tnew_pnt:
3541  case Hexagon::L4_return_fnew_pnt:
3542  case Hexagon::L4_return_tnew_pt:
3543  case Hexagon::L4_return_fnew_pt:
3544  // [if ([!]p0[.new])] dealloc_return
3545  SrcReg = MI.getOperand(0).getReg();
3546  if (Hexagon::PredRegsRegClass.contains(SrcReg) && (Hexagon::P0 == SrcReg))
3547  return HexagonII::HSIG_L2;
3548  break;
3549  //
3550  // Group S1:
3551  //
3552  // memw(Rs+#u4:2) = Rt
3553  // memb(Rs+#u4:0) = Rt
3554  case Hexagon::S2_storeri_io:
3555  // Special case this one from Group S2.
3556  // memw(r29+#u5:2) = Rt
3557  Src1Reg = MI.getOperand(0).getReg();
3558  Src2Reg = MI.getOperand(2).getReg();
3559  if (Hexagon::IntRegsRegClass.contains(Src1Reg) &&
3560  isIntRegForSubInst(Src2Reg) &&
3561  HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
3562  isShiftedUInt<5,2>(MI.getOperand(1).getImm()))
3563  return HexagonII::HSIG_S2;
3564  // memw(Rs+#u4:2) = Rt
3565  if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3566  MI.getOperand(1).isImm() &&
3567  isShiftedUInt<4,2>(MI.getOperand(1).getImm()))
3568  return HexagonII::HSIG_S1;
3569  break;
3570  case Hexagon::S2_storerb_io:
3571  // memb(Rs+#u4:0) = Rt
3572  Src1Reg = MI.getOperand(0).getReg();
3573  Src2Reg = MI.getOperand(2).getReg();
3574  if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3575  MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()))
3576  return HexagonII::HSIG_S1;
3577  break;
3578  //
3579  // Group S2:
3580  //
3581  // memh(Rs+#u3:1) = Rt
3582  // memw(r29+#u5:2) = Rt
3583  // memd(r29+#s6:3) = Rtt
3584  // memw(Rs+#u4:2) = #U1
3585  // memb(Rs+#u4) = #U1
3586  // allocframe(#u5:3)
3587  case Hexagon::S2_storerh_io:
3588  // memh(Rs+#u3:1) = Rt
3589  Src1Reg = MI.getOperand(0).getReg();
3590  Src2Reg = MI.getOperand(2).getReg();
3591  if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3592  MI.getOperand(1).isImm() &&
3593  isShiftedUInt<3,1>(MI.getOperand(1).getImm()))
3594  return HexagonII::HSIG_S1;
3595  break;
3596  case Hexagon::S2_storerd_io:
3597  // memd(r29+#s6:3) = Rtt
3598  Src1Reg = MI.getOperand(0).getReg();
3599  Src2Reg = MI.getOperand(2).getReg();
3600  if (isDblRegForSubInst(Src2Reg, HRI) &&
3601  Hexagon::IntRegsRegClass.contains(Src1Reg) &&
3602  HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
3603  isShiftedInt<6,3>(MI.getOperand(1).getImm()))
3604  return HexagonII::HSIG_S2;
3605  break;
3606  case Hexagon::S4_storeiri_io:
3607  // memw(Rs+#u4:2) = #U1
3608  Src1Reg = MI.getOperand(0).getReg();
3609  if (isIntRegForSubInst(Src1Reg) && MI.getOperand(1).isImm() &&
3610  isShiftedUInt<4,2>(MI.getOperand(1).getImm()) &&
3611  MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
3612  return HexagonII::HSIG_S2;
3613  break;
3614  case Hexagon::S4_storeirb_io:
3615  // memb(Rs+#u4) = #U1
3616  Src1Reg = MI.getOperand(0).getReg();
3617  if (isIntRegForSubInst(Src1Reg) &&
3618  MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()) &&
3619  MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
3620  return HexagonII::HSIG_S2;
3621  break;
3622  case Hexagon::S2_allocframe:
3623  if (MI.getOperand(0).isImm() &&
3624  isShiftedUInt<5,3>(MI.getOperand(0).getImm()))
3625  return HexagonII::HSIG_S1;
3626  break;
3627  //
3628  // Group A:
3629  //
3630  // Rx = add(Rx,#s7)
3631  // Rd = Rs
3632  // Rd = #u6
3633  // Rd = #-1
3634  // if ([!]P0[.new]) Rd = #0
3635  // Rd = add(r29,#u6:2)
3636  // Rx = add(Rx,Rs)
3637  // P0 = cmp.eq(Rs,#u2)
3638  // Rdd = combine(#0,Rs)
3639  // Rdd = combine(Rs,#0)
3640  // Rdd = combine(#u2,#U2)
3641  // Rd = add(Rs,#1)
3642  // Rd = add(Rs,#-1)
3643  // Rd = sxth/sxtb/zxtb/zxth(Rs)
3644  // Rd = and(Rs,#1)
3645  case Hexagon::A2_addi:
3646  DstReg = MI.getOperand(0).getReg();
3647  SrcReg = MI.getOperand(1).getReg();
3648  if (isIntRegForSubInst(DstReg)) {
3649  // Rd = add(r29,#u6:2)
3650  if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3651  HRI.getStackRegister() == SrcReg && MI.getOperand(2).isImm() &&
3652  isShiftedUInt<6,2>(MI.getOperand(2).getImm()))
3653  return HexagonII::HSIG_A;
3654  // Rx = add(Rx,#s7)
3655  if ((DstReg == SrcReg) && MI.getOperand(2).isImm() &&
3656  isInt<7>(MI.getOperand(2).getImm()))
3657  return HexagonII::HSIG_A;
3658  // Rd = add(Rs,#1)
3659  // Rd = add(Rs,#-1)
3660  if (isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
3661  ((MI.getOperand(2).getImm() == 1) ||
3662  (MI.getOperand(2).getImm() == -1)))
3663  return HexagonII::HSIG_A;
3664  }
3665  break;
3666  case Hexagon::A2_add:
3667  // Rx = add(Rx,Rs)
3668  DstReg = MI.getOperand(0).getReg();
3669  Src1Reg = MI.getOperand(1).getReg();
3670  Src2Reg = MI.getOperand(2).getReg();
3671  if (isIntRegForSubInst(DstReg) && (DstReg == Src1Reg) &&
3672  isIntRegForSubInst(Src2Reg))
3673  return HexagonII::HSIG_A;
3674  break;
3675  case Hexagon::A2_andir:
3676  // Same as zxtb.
3677  // Rd16=and(Rs16,#255)
3678  // Rd16=and(Rs16,#1)
3679  DstReg = MI.getOperand(0).getReg();
3680  SrcReg = MI.getOperand(1).getReg();
3681  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3682  MI.getOperand(2).isImm() &&
3683  ((MI.getOperand(2).getImm() == 1) ||
3684  (MI.getOperand(2).getImm() == 255)))
3685  return HexagonII::HSIG_A;
3686  break;
3687  case Hexagon::A2_tfr:
3688  // Rd = Rs
3689  DstReg = MI.getOperand(0).getReg();
3690  SrcReg = MI.getOperand(1).getReg();
3691  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3692  return HexagonII::HSIG_A;
3693  break;
3694  case Hexagon::A2_tfrsi:
3695  // Rd = #u6
3696  // Do not test for #u6 size since the const is getting extended
3697  // regardless and compound could be formed.
3698  // Rd = #-1
3699  DstReg = MI.getOperand(0).getReg();
3700  if (isIntRegForSubInst(DstReg))
3701  return HexagonII::HSIG_A;
3702  break;
3703  case Hexagon::C2_cmoveit:
3704  case Hexagon::C2_cmovenewit:
3705  case Hexagon::C2_cmoveif:
3706  case Hexagon::C2_cmovenewif:
3707  // if ([!]P0[.new]) Rd = #0
3708  // Actual form:
3709  // %R16<def> = C2_cmovenewit %P0<internal>, 0, %R16<imp-use,undef>;
3710  DstReg = MI.getOperand(0).getReg();
3711  SrcReg = MI.getOperand(1).getReg();
3712  if (isIntRegForSubInst(DstReg) &&
3713  Hexagon::PredRegsRegClass.contains(SrcReg) && Hexagon::P0 == SrcReg &&
3714  MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0)
3715  return HexagonII::HSIG_A;
3716  break;
3717  case Hexagon::C2_cmpeqi:
3718  // P0 = cmp.eq(Rs,#u2)
3719  DstReg = MI.getOperand(0).getReg();
3720  SrcReg = MI.getOperand(1).getReg();
3721  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3722  Hexagon::P0 == DstReg && isIntRegForSubInst(SrcReg) &&
3723  MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm()))
3724  return HexagonII::HSIG_A;
3725  break;
3726  case Hexagon::A2_combineii:
3727  case Hexagon::A4_combineii:
3728  // Rdd = combine(#u2,#U2)
3729  DstReg = MI.getOperand(0).getReg();
3730  if (isDblRegForSubInst(DstReg, HRI) &&
3731  ((MI.getOperand(1).isImm() && isUInt<2>(MI.getOperand(1).getImm())) ||
3732  (MI.getOperand(1).isGlobal() &&
3733  isUInt<2>(MI.getOperand(1).getOffset()))) &&
3734  ((MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm())) ||
3735  (MI.getOperand(2).isGlobal() &&
3736  isUInt<2>(MI.getOperand(2).getOffset()))))
3737  return HexagonII::HSIG_A;
3738  break;
3739  case Hexagon::A4_combineri:
3740  // Rdd = combine(Rs,#0)
3741  DstReg = MI.getOperand(0).getReg();
3742  SrcReg = MI.getOperand(1).getReg();
3743  if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
3744  ((MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) ||
3745  (MI.getOperand(2).isGlobal() && MI.getOperand(2).getOffset() == 0)))
3746  return HexagonII::HSIG_A;
3747  break;
3748  case Hexagon::A4_combineir:
3749  // Rdd = combine(#0,Rs)
3750  DstReg = MI.getOperand(0).getReg();
3751  SrcReg = MI.getOperand(2).getReg();
3752  if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
3753  ((MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) ||
3754  (MI.getOperand(1).isGlobal() && MI.getOperand(1).getOffset() == 0)))
3755  return HexagonII::HSIG_A;
3756  break;
3757  case Hexagon::A2_sxtb:
3758  case Hexagon::A2_sxth:
3759  case Hexagon::A2_zxtb:
3760  case Hexagon::A2_zxth:
3761  // Rd = sxth/sxtb/zxtb/zxth(Rs)
3762  DstReg = MI.getOperand(0).getReg();
3763  SrcReg = MI.getOperand(1).getReg();
3764  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3765  return HexagonII::HSIG_A;
3766  break;
3767  }
3768 
3769  return HexagonII::HSIG_None;
3770 }
3771 
3773  return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Real);
3774 }
3775 
3777  const InstrItineraryData *ItinData, const MachineInstr &MI) const {
3778  // Default to one cycle for no itinerary. However, an "empty" itinerary may
3779  // still have a MinLatency property, which getStageLatency checks.
3780  if (!ItinData)
3781  return getInstrLatency(ItinData, MI);
3782 
3783  if (MI.isTransient())
3784  return 0;
3785  return ItinData->getStageLatency(MI.getDesc().getSchedClass());
3786 }
3787 
3788 /// getOperandLatency - Compute and return the use operand latency of a given
3789 /// pair of def and use.
3790 /// In most cases, the static scheduling itinerary was enough to determine the
3791 /// operand latency. But it may not be possible for instructions with variable
3792 /// number of defs / uses.
3793 ///
3794 /// This is a raw interface to the itinerary that may be directly overriden by
3795 /// a target. Use computeOperandLatency to get the best estimate of latency.
3797  const MachineInstr &DefMI,
3798  unsigned DefIdx,
3799  const MachineInstr &UseMI,
3800  unsigned UseIdx) const {
3801  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
3802 
3803  // Get DefIdx and UseIdx for super registers.
3804  MachineOperand DefMO = DefMI.getOperand(DefIdx);
3805 
3806  if (HRI.isPhysicalRegister(DefMO.getReg())) {
3807  if (DefMO.isImplicit()) {
3808  for (MCSuperRegIterator SR(DefMO.getReg(), &HRI); SR.isValid(); ++SR) {
3809  int Idx = DefMI.findRegisterDefOperandIdx(*SR, false, false, &HRI);
3810  if (Idx != -1) {
3811  DefIdx = Idx;
3812  break;
3813  }
3814  }
3815  }
3816 
3817  MachineOperand UseMO = UseMI.getOperand(UseIdx);
3818  if (UseMO.isImplicit()) {
3819  for (MCSuperRegIterator SR(UseMO.getReg(), &HRI); SR.isValid(); ++SR) {
3820  int Idx = UseMI.findRegisterUseOperandIdx(*SR, false, &HRI);
3821  if (Idx != -1) {
3822  UseIdx = Idx;
3823  break;
3824  }
3825  }
3826  }
3827  }
3828 
3829  return TargetInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
3830  UseMI, UseIdx);
3831 }
3832 
3833 // inverts the predication logic.
3834 // p -> NotP
3835 // NotP -> P
3837  SmallVectorImpl<MachineOperand> &Cond) const {
3838  if (Cond.empty())
3839  return false;
3840  unsigned Opc = getInvertedPredicatedOpcode(Cond[0].getImm());
3841  Cond[0].setImm(Opc);
3842  return true;
3843 }
3844 
3845 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
3846  int InvPredOpcode;
3847  InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
3848  : Hexagon::getTruePredOpcode(Opc);
3849  if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
3850  return InvPredOpcode;
3851 
3852  llvm_unreachable("Unexpected predicated instruction");
3853 }
3854 
3855 // Returns the max value that doesn't need to be extended.
3857  const uint64_t F = MI.getDesc().TSFlags;
3858  unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
3860  unsigned bits = (F >> HexagonII::ExtentBitsPos)
3862 
3863  if (isSigned) // if value is signed
3864  return ~(-1U << (bits - 1));
3865  else
3866  return ~(-1U << bits);
3867 }
3868 
3870  using namespace HexagonII;
3871 
3872  const uint64_t F = MI.getDesc().TSFlags;
3873  unsigned S = (F >> MemAccessSizePos) & MemAccesSizeMask;
3875  if (Size != 0)
3876  return Size;
3877 
3878  // Handle vector access sizes.
3879  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
3880  switch (S) {
3882  return HRI.getSpillSize(Hexagon::HvxVRRegClass);
3883  default:
3884  llvm_unreachable("Unexpected instruction");
3885  }
3886 }
3887 
3888 // Returns the min value that doesn't need to be extended.
3890  const uint64_t F = MI.getDesc().TSFlags;
3891  unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
3893  unsigned bits = (F >> HexagonII::ExtentBitsPos)
3895 
3896  if (isSigned) // if value is signed
3897  return -1U << (bits - 1);
3898  else
3899  return 0;
3900 }
3901 
3902 // Returns opcode of the non-extended equivalent instruction.
3904  // Check if the instruction has a register form that uses register in place
3905  // of the extended operand, if so return that as the non-extended form.
3906  short NonExtOpcode = Hexagon::getRegForm(MI.getOpcode());
3907  if (NonExtOpcode >= 0)
3908  return NonExtOpcode;
3909 
3910  if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
3911  // Check addressing mode and retrieve non-ext equivalent instruction.
3912  switch (getAddrMode(MI)) {
3913  case HexagonII::Absolute:
3914  return Hexagon::changeAddrMode_abs_io(MI.getOpcode());
3916  return Hexagon::changeAddrMode_io_rr(MI.getOpcode());
3918  return Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
3919 
3920  default:
3921  return -1;
3922  }
3923  }
3924  return -1;
3925 }
3926 
3928  unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const {
3929  if (Cond.empty())
3930  return false;
3931  assert(Cond.size() == 2);
3932  if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) {
3933  DEBUG(dbgs() << "No predregs for new-value jumps/endloop");
3934  return false;
3935  }
3936  PredReg = Cond[1].getReg();
3937  PredRegPos = 1;
3938  // See IfConversion.cpp why we add RegState::Implicit | RegState::Undef
3939  PredRegFlags = 0;
3940  if (Cond[1].isImplicit())
3941  PredRegFlags = RegState::Implicit;
3942  if (Cond[1].isUndef())
3943  PredRegFlags |= RegState::Undef;
3944  return true;
3945 }
3946 
3948  return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Pseudo);
3949 }
3950 
3952  return Hexagon::getRegForm(MI.getOpcode());
3953 }
3954 
3955 // Return the number of bytes required to encode the instruction.
3956 // Hexagon instructions are fixed length, 4 bytes, unless they
3957 // use a constant extender, which requires another 4 bytes.
3958 // For debug instructions and prolog labels, return 0.
3960  if (MI.isDebugValue() || MI.isPosition())
3961  return 0;
3962 
3963  unsigned Size = MI.getDesc().getSize();
3964  if (!Size)
3965  // Assume the default insn size in case it cannot be determined
3966  // for whatever reason.
3967  Size = HEXAGON_INSTR_SIZE;
3968 
3969  if (isConstExtended(MI) || isExtended(MI))
3970  Size += HEXAGON_INSTR_SIZE;
3971 
3972  // Try and compute number of instructions in asm.
3974  const MachineBasicBlock &MBB = *MI.getParent();
3975  const MachineFunction *MF = MBB.getParent();
3976  const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
3977 
3978  // Count the number of register definitions to find the asm string.
3979  unsigned NumDefs = 0;
3980  for (; MI.getOperand(NumDefs).isReg() && MI.getOperand(NumDefs).isDef();
3981  ++NumDefs)
3982  assert(NumDefs != MI.getNumOperands()-2 && "No asm string?");
3983 
3984  assert(MI.getOperand(NumDefs).isSymbol() && "No asm string?");
3985  // Disassemble the AsmStr and approximate number of instructions.
3986  const char *AsmStr = MI.getOperand(NumDefs).getSymbolName();
3987  Size = getInlineAsmLength(AsmStr, *MAI);
3988  }
3989 
3990  return Size;
3991 }
3992 
3994  const uint64_t F = MI.getDesc().TSFlags;
3995  return (F >> HexagonII::TypePos) & HexagonII::TypeMask;
3996 }
3997 
3999  const InstrItineraryData &II = *Subtarget.getInstrItineraryData();
4000  const InstrStage &IS = *II.beginStage(MI.getDesc().getSchedClass());
4001 
4002  return IS.getUnits();
4003 }
4004 
4005 // Calculate size of the basic block without debug instructions.
4007  return nonDbgMICount(BB->instr_begin(), BB->instr_end());
4008 }
4009 
4011  MachineBasicBlock::const_iterator BundleHead) const {
4012  assert(BundleHead->isBundle() && "Not a bundle header");
4013  auto MII = BundleHead.getInstrIterator();
4014  // Skip the bundle header.
4015  return nonDbgMICount(++MII, getBundleEnd(BundleHead.getInstrIterator()));
4016 }
4017 
4018 /// immediateExtend - Changes the instruction in place to one using an immediate
4019 /// extender.
4021  assert((isExtendable(MI)||isConstExtended(MI)) &&
4022  "Instruction must be extendable");
4023  // Find which operand is extendable.
4024  short ExtOpNum = getCExtOpNum(MI);
4025  MachineOperand &MO = MI.getOperand(ExtOpNum);
4026  // This needs to be something we understand.
4027  assert((MO.isMBB() || MO.isImm()) &&
4028  "Branch with unknown extendable field type");
4029  // Mark given operand as extended.
4031 }
4032 
4034  MachineInstr &MI, MachineBasicBlock *NewTarget) const {
4035  DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to BB#"
4036  << NewTarget->getNumber(); MI.dump(););
4037  assert(MI.isBranch());
4038  unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode());
4039  int TargetPos = MI.getNumOperands() - 1;
4040  // In general branch target is the last operand,
4041  // but some implicit defs added at the end might change it.
4042  while ((TargetPos > -1) && !MI.getOperand(TargetPos).isMBB())
4043  --TargetPos;
4044  assert((TargetPos >= 0) && MI.getOperand(TargetPos).isMBB());
4045  MI.getOperand(TargetPos).setMBB(NewTarget);
4047  NewOpcode = reversePrediction(NewOpcode);
4048  }
4049  MI.setDesc(get(NewOpcode));
4050  return true;
4051 }
4052 
4054  /* +++ The code below is used to generate complete set of Hexagon Insn +++ */
4056  MachineBasicBlock &B = *A;
4058  DebugLoc DL = I->getDebugLoc();
4059  MachineInstr *NewMI;
4060 
4061  for (unsigned insn = TargetOpcode::GENERIC_OP_END+1;
4062  insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
4063  NewMI = BuildMI(B, I, DL, get(insn));
4064  DEBUG(dbgs() << "\n" << getName(NewMI->getOpcode()) <<
4065  " Class: " << NewMI->getDesc().getSchedClass());
4066  NewMI->eraseFromParent();
4067  }
4068  /* --- The code above is used to generate complete set of Hexagon Insn --- */
4069 }
4070 
4071 // inverts the predication logic.
4072 // p -> NotP
4073 // NotP -> P
4075  DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI.dump());
4077  return true;
4078 }
4079 
4080 // Reverse the branch prediction.
4081 unsigned HexagonInstrInfo::reversePrediction(unsigned Opcode) const {
4082  int PredRevOpcode = -1;
4083  if (isPredictedTaken(Opcode))
4084  PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
4085  else
4086  PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
4087  assert(PredRevOpcode > 0);
4088  return PredRevOpcode;
4089 }
4090 
4091 // TODO: Add more rigorous validation.
4093  const {
4094  return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1));
4095 }
4096 
4097 // Addressing mode relations.
4099  return Opc >= 0 ? Hexagon::changeAddrMode_abs_io(Opc) : Opc;
4100 }
4101 
4103  return Opc >= 0 ? Hexagon::changeAddrMode_io_abs(Opc) : Opc;
4104 }
4105 
4107  return Opc >= 0 ? Hexagon::changeAddrMode_io_rr(Opc) : Opc;
4108 }
4109 
4111  return Opc >= 0 ? Hexagon::changeAddrMode_rr_io(Opc) : Opc;
4112 }
4113 
4115  return Opc >= 0 ? Hexagon::changeAddrMode_rr_ur(Opc) : Opc;
4116 }
4117 
4119  return Opc >= 0 ? Hexagon::changeAddrMode_ur_rr(Opc) : Opc;
4120 }
static bool isReg(const MCInst &MI, unsigned OpNo)
unsigned getTargetFlags() const
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
short getNonExtOpcode(const MachineInstr &MI) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Return an array that contains the direct target flag values and their names.
bool isVecALU(const MachineInstr &MI) const
void push_back(const T &Elt)
Definition: SmallVector.h:212
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool DefinesPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred) const override
If the specified instruction defines any predicate or condition code register(s) used for predication...
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:843
short changeAddrMode_rr_io(short Opc) const
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
Definition: MachineInstr.h:965
instr_iterator instr_begin()
const int Hexagon_MEMH_OFFSET_MAX
bool is_TC2early(unsigned SchedClass)
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:458
unsigned nonDbgBBSize(const MachineBasicBlock *BB) const
getInstrTimingClassLatency - Compute the instruction latency of a given instruction using Timing Clas...
instr_iterator instr_end()
MachineBasicBlock * getMBB() const
const int Hexagon_ADDI_OFFSET_MAX
unsigned getFrameRegister(const MachineFunction &MF) const override
unsigned getRegState(const MachineOperand &RegOp)
Get all register state flags from machine operand RegOp.
const int Hexagon_MEMH_OFFSET_MIN
bool available(const MachineRegisterInfo &MRI, unsigned Reg) const
Returns true if register Reg and no aliasing register is in the set.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
const InstrStage * beginStage(unsigned ItinClassIndx) const
Return the first stage of the itinerary.
static void parseOperands(const MachineInstr &MI, SmallVector< unsigned, 4 > &Defs, SmallVector< unsigned, 8 > &Uses)
Gather register def/uses from MI.
static cl::opt< bool > UseDFAHazardRec("dfa-hazard-rec", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("Use the DFA based hazard recognizer."))
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
short getEquivalentHWInstr(const MachineInstr &MI) const
DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &STI) const override
Create machine specific model for scheduling.
bool isAbsoluteSet(const MachineInstr &MI) const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:268
bool isJumpR(const MachineInstr &MI) const
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
Returns true if the first specified predicate subsumes the second, e.g.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
unsigned getUnits() const
Returns the choice of FUs.
bool isConstExtended(const MachineInstr &MI) const
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
bool getInvertedPredSense(SmallVectorImpl< MachineOperand > &Cond) const
Address of indexed Jump Table for switch.
unsigned nonDbgBundleSize(MachineBasicBlock::const_iterator BundleHead) const
unsigned getSubReg() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
bool isInlineAsm() const
Definition: MachineInstr.h:832
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
int getMaxValue(const MachineInstr &MI) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
Return an array that contains the bitmask target flag values and their names.
const int Hexagon_ADDI_OFFSET_MIN
bool reversePredSense(MachineInstr &MI) const
int getDotNewPredOp(const MachineInstr &MI, const MachineBranchProbabilityInfo *MBPI) const
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
Definition: MachineInstr.h:900
demanded bits
MachineBasicBlock reference.
bool isExpr(unsigned OpType) const
bool isTailCall(const MachineInstr &MI) const override
A debug info location.
Definition: DebugLoc.h:34
F(f)
unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr *> &PrevInsts, unsigned Iter, unsigned MaxIter) const override
Generate code to reduce the loop iteration by one and check if the loop is finished.
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool producesStall(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
MachineInstrBundleIterator< const MachineInstr > const_iterator
const HexagonFrameLowering * getFrameLowering() const override
unsigned getMemAccessSize(const MachineInstr &MI) const
const int Hexagon_MEMD_OFFSET_MAX
unsigned getSize(const MachineInstr &MI) const
int getDotCurOp(const MachineInstr &MI) const
bool mayLoad() const
Return true if this instruction could possibly read memory.
Definition: MCInstrDesc.h:387
bool isLateResultInstr(const MachineInstr &MI) const
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:332
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool is_TC1(unsigned SchedClass)
void immediateExtend(MachineInstr &MI) const
immediateExtend - Changes the instruction in place to one using an immediate extender.
int getDotNewPredJumpOp(const MachineInstr &MI, const MachineBranchProbabilityInfo *MBPI) const
short changeAddrMode_ur_rr(short Opc) const
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
return AArch64::GPR64RegClass contains(Reg)
iterator_range< succ_iterator > successors()
bool isToBeScheduledASAP(const MachineInstr &MI1, const MachineInstr &MI2) const
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
Insert a noop into the instruction stream at the specified point.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e...
bool isHVXVec(const MachineInstr &MI) const
static cl::opt< bool > BranchRelaxAsmLarge("branch-relax-asm-large", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("branch relax asm"))
bool isComplex(const MachineInstr &MI) const
unsigned getSpillAlignment(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class...
static cl::opt< bool > DisableNVSchedule("disable-hexagon-nv-schedule", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Disable schedule adjustment for new value stores."))
A description of a memory reference used in the backend.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
unsigned getInvertedPredicatedOpcode(const int Opc) const
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MCSuperRegIterator enumerates all super-registers of Reg.
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:451
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:293
LLVM_NODISCARD size_t count(char C) const
Return the number of occurrences of C in the string.
Definition: StringRef.h:476
unsigned getCompoundOpcode(const MachineInstr &GA, const MachineInstr &GB) const
bool isPredicatedNew(const MachineInstr &MI) const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
bool canExecuteInBundle(const MachineInstr &First, const MachineInstr &Second) const
Can these instructions execute at the same time in a bundle.
bool predOpcodeHasNot(ArrayRef< MachineOperand > Cond) const
const HexagonRegisterInfo * getRegisterInfo() const override
MachineBasicBlock * getBottomBlock()
Return the "bottom" block in the loop, which is the last block in the linear layout, ignoring any parts of the loop not contiguous with the part that contains the header.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
bool isVecUsableNextPacket(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
Name of external global symbol.
Reg
All possible values of the reg field in the ModR/M byte.
static StringRef getName(Value *V)
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
bool isDotNewInst(const MachineInstr &MI) const
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:290
bool isDeallocRet(const MachineInstr &MI) const
bool isExtended(const MachineInstr &MI) const
const char * getSymbolName() const
bool is_TC2(unsigned SchedClass)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:634
static MachineInstr * findLoopInstr(MachineBasicBlock *BB, unsigned EndLoopOp, MachineBasicBlock *TargetBB, SmallPtrSet< MachineBasicBlock *, 8 > &Visited)
Find the hardware loop instruction used to set-up the specified loop.
bool isSolo(const MachineInstr &MI) const
bool isLateInstrFeedsEarlyInstr(const MachineInstr &LRMI, const MachineInstr &ESMI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
This function is called for all pseudo instructions that remain after register allocation.
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Cond) const override
Convert the instruction into a predicated instruction.
bool predCanBeUsedAsDotNew(const MachineInstr &MI, unsigned PredReg) const
void RemoveOperand(unsigned i)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
SmallVector< MachineInstr *, 2 > getBranchingInstrs(MachineBasicBlock &MBB) const
bool doesNotReturn(const MachineInstr &CallMI) const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:287
bool isIndirectBranch(QueryType Type=AnyInBundle) const
Return true if this is an indirect branch, such as a branch through a register.
Definition: MachineInstr.h:488
bool isEndLoopN(unsigned Opcode) const
unsigned getBaseAndOffset(const MachineInstr &MI, int &Offset, unsigned &AccessSize) const
bool isCompoundBranchInstr(const MachineInstr &MI) const
void clearKillFlags(unsigned Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
short changeAddrMode_io_rr(short Opc) const
const RegList & Regs
bool isPredictedTaken(unsigned Opcode) const
int getMinValue(const MachineInstr &MI) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Itinerary data supplied by a subtarget to be used by a target.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they&#39;re not in a MachineFuncti...
bool isTC1(const MachineInstr &MI) const
Printable PrintReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubRegIdx=0)
Prints virtual and physical registers with or without a TRI instance.
unsigned getUndefRegState(bool B)
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
Compute the instruction latency of a given instruction.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
short changeAddrMode_rr_ur(short Opc) const
reverse_iterator rbegin()
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:482
bool getPredReg(ArrayRef< MachineOperand > Cond, unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const
BasicBlockListType::iterator iterator
unsigned getKillRegState(bool B)
virtual const InstrItineraryData * getInstrItineraryData() const
getInstrItineraryData - Returns instruction itinerary data for the target or specific subtarget...
unsigned getCExtOpNum(const MachineInstr &MI) const
const int Hexagon_MEMD_OFFSET_MIN
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:454
bool is_TC3x(unsigned SchedClass)
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
static cl::opt< bool > EnableBranchPrediction("hexagon-enable-branch-prediction", cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"))
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
Return true if it&#39;s profitable for if-converter to duplicate instructions of specified accumulated in...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:565
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:639
Address of a global value.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
const int Hexagon_MEMW_OFFSET_MAX
Constants for Hexagon instructions.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:421
Machine Value Type.
static cl::opt< bool > EnableALUForwarding("enable-alu-forwarding", cl::Hidden, cl::init(true), cl::desc("Enable vec alu forwarding"))
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
bool getIncrementValue(const MachineInstr &MI, int &Value) const override
If the instruction is an increment of a constant value, return the amount.
bool isCompare(QueryType Type=IgnoreBundle) const
Return true if this instruction is a comparison.
Definition: MachineInstr.h:519
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, unsigned &SrcReg2, int &Mask, int &Value) const override
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
bool validateBranchCond(const ArrayRef< MachineOperand > &Cond) const
MachineInstrBuilder & UseMI
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const char * getSeparatorString() const
Definition: MCAsmInfo.h:464
bool isPredicable(const MachineInstr &MI) const override
Return true if the specified instruction can be predicated.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE instructions.
bool isAccumulator(const MachineInstr &MI) const
bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const override
For instructions with a base and offset, return the position of the base register and offset operands...
bool isZeroExtendingLoad(const MachineInstr &MI) const
short changeAddrMode_io_abs(short Opc) const
bool hasUncondBranch(const MachineBasicBlock *B) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
short changeAddrMode_abs_io(short Opc) const
unsigned getAddrMode(const MachineInstr &MI) const
int getNonDotCurOp(const MachineInstr &MI) const
bool invertAndChangeJumpTarget(MachineInstr &MI, MachineBasicBlock *NewTarget) const
void setMBB(MachineBasicBlock *MBB)
bool isTC2Early(const MachineInstr &MI) const
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle).
Address of a basic block.
static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI)
bool isValidAutoIncImm(const EVT VT, const int Offset) const
StringRef getCommentString() const
Definition: MCAsmInfo.h:470
bool isFloat(const MachineInstr &MI) const
void setImm(int64_t immVal)
self_iterator getIterator()
Definition: ilist_node.h:82
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:301
iterator_range< pred_iterator > predecessors()
HexagonII::SubInstructionGroup getDuplexCandidateGroup(const MachineInstr &MI) const
void genAllInsnTimingClasses(MachineFunction &MF) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Extended Value Type.
Definition: ValueTypes.h:34
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
short getPseudoInstrPair(const MachineInstr &MI) const
MCSubRegIterator enumerates all sub-registers of Reg.
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
bool isEarlySourceInstr(const MachineInstr &MI) const
bool isPostIncrement(const MachineInstr &MI) const override
Return true for post-incremented instructions.
bool isExtendable(const MachineInstr &MI) const
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
MO_LO16 - On a symbol operand, this represents a relocation containing lower 16 bit of the address...
Definition: ARMBaseInfo.h:223
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
static cl::opt< bool > EnableACCForwarding("enable-acc-forwarding", cl::Hidden, cl::init(true), cl::desc("Enable vec acc forwarding"))
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
void setIsKill(bool Val=true)
bool hasPseudoInstrPair(const MachineInstr &MI) const
The memory access writes data.
bool isIndirectCall(const MachineInstr &MI) const
bool isTC4x(const MachineInstr &MI) const
bool isDotCurInst(const MachineInstr &MI) const
static bool isIntRegForSubInst(unsigned Reg)
bool isConditionalBranch(QueryType Type=AnyInBundle) const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MachineInstr.h:496
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
bool isNewValueStore(const MachineInstr &MI) const
HexagonInstrInfo(HexagonSubtarget &ST)
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Decompose the machine operand&#39;s target flags into two values - the direct target flag value and any o...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:389
bool isNewValueInst(const MachineInstr &MI) const
bool isDebugValue() const
Definition: MachineInstr.h:816
MachineOperand class - Representation of each machine instruction operand.
bool isVecAcc(const MachineInstr &MI) const
MachineInstrBuilder MachineInstrBuilder & DefMI
bool hasEHLabel(const MachineBasicBlock *B) const
bool isJumpWithinBranchRange(const MachineInstr &MI, unsigned offset) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
unsigned getUnits(const MachineInstr &MI) const
short getRegForm(const MachineInstr &MI) const
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
static void getLiveRegsAt(LivePhysRegs &Regs, const MachineInstr &MI)
MO_PCREL - On a symbol operand, indicates a PC-relative relocation Used for computing a global addres...
uint64_t getType(const MachineInstr &MI) const
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
Return true if it&#39;s profitable to predicate instructions with accumulated instruction latency of "Num...
bool isSignExtendingLoad(const MachineInstr &MI) const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:225
bool isOperandExtended(const MachineInstr &MI, unsigned OperandNum) const
unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const override
Measure the specified inline asm to determine an approximation of its length.
int64_t getImm() const
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
bool isDuplexPair(const MachineInstr &MIa, const MachineInstr &MIb) const
Symmetrical. See if these two instructions are fit for duplex pair.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:531
bool isLateSourceInstr(const MachineInstr &MI) const
bool isSpillPredRegOp(const MachineInstr &MI) const
unsigned getMaxInstLength() const
Definition: MCAsmInfo.h:461
static bool isDuplexPairMatch(unsigned Ga, unsigned Gb)
unsigned const TypeCVI_FIRST
unsigned createVR(MachineFunction *MF, MVT VT) const
HexagonInstrInfo specifics.
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Test if the given instruction should be considered a scheduling boundary.
unsigned reversePrediction(unsigned Opcode) const
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
bool isNewValue(const MachineInstr &MI) const
int findRegisterDefOperandIdx(unsigned Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a def of the specified register or -1 if it is not found...
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool isSaveCalleeSavedRegsCall(const MachineInstr &MI) const
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
Definition: MachineInstr.h:927
static LLVM_ATTRIBUTE_UNUSED unsigned getMemAccessSizeInBytes(MemAccessSize S)
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Remove the branching code at the end of the specific MBB.
bool mayStore() const
Return true if this instruction could possibly modify memory.
Definition: MCInstrDesc.h:393
unsigned const TypeCVI_LAST
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
unsigned succ_size() const
int getDotNewOp(const MachineInstr &MI) const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:139
int getCondOpcode(int Opc, bool sense) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
TargetSubtargetInfo - Generic base class for all target subtargets.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
bool isPredicateLate(unsigned Opcode) const
Representation of each machine instruction.
Definition: MachineInstr.h:59
bool addLatencyToSchedule(const MachineInstr &MI1, const MachineInstr &MI2) const