LLVM  4.0.0
HexagonInstrInfo.cpp
Go to the documentation of this file.
1 //===-- HexagonInstrInfo.cpp - Hexagon Instruction Information ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the Hexagon implementation of the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "Hexagon.h"
16 #include "HexagonInstrInfo.h"
17 #include "HexagonRegisterInfo.h"
18 #include "HexagonSubtarget.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringRef.h"
36 #include "llvm/MC/MCAsmInfo.h"
37 #include "llvm/MC/MCInstrDesc.h"
39 #include "llvm/MC/MCRegisterInfo.h"
42 #include "llvm/Support/Debug.h"
48 #include <cassert>
49 #include <cctype>
50 #include <cstdint>
51 #include <cstring>
52 #include <iterator>
53 
54 using namespace llvm;
55 
56 #define DEBUG_TYPE "hexagon-instrinfo"
57 
58 #define GET_INSTRINFO_CTOR_DTOR
59 #define GET_INSTRMAP_INFO
60 #include "HexagonGenInstrInfo.inc"
61 #include "HexagonGenDFAPacketizer.inc"
62 
63 cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
64  cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
65  "packetization boundary."));
66 
67 static cl::opt<bool> EnableBranchPrediction("hexagon-enable-branch-prediction",
68  cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"));
69 
70 static cl::opt<bool> DisableNVSchedule("disable-hexagon-nv-schedule",
72  cl::desc("Disable schedule adjustment for new value stores."));
73 
75  "enable-timing-class-latency", cl::Hidden, cl::init(false),
76  cl::desc("Enable timing class latency"));
77 
79  "enable-alu-forwarding", cl::Hidden, cl::init(true),
80  cl::desc("Enable vec alu forwarding"));
81 
83  "enable-acc-forwarding", cl::Hidden, cl::init(true),
84  cl::desc("Enable vec acc forwarding"));
85 
86 static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large",
87  cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("branch relax asm"));
88 
89 static cl::opt<bool> UseDFAHazardRec("dfa-hazard-rec",
91  cl::desc("Use the DFA based hazard recognizer."));
92 
93 ///
94 /// Constants for Hexagon instructions.
95 ///
96 const int Hexagon_MEMV_OFFSET_MAX_128B = 896; // #s4: -8*128...7*128
97 const int Hexagon_MEMV_OFFSET_MIN_128B = -1024; // #s4
98 const int Hexagon_MEMV_OFFSET_MAX = 448; // #s4: -8*64...7*64
99 const int Hexagon_MEMV_OFFSET_MIN = -512; // #s4
100 const int Hexagon_MEMW_OFFSET_MAX = 4095;
101 const int Hexagon_MEMW_OFFSET_MIN = -4096;
102 const int Hexagon_MEMD_OFFSET_MAX = 8191;
103 const int Hexagon_MEMD_OFFSET_MIN = -8192;
104 const int Hexagon_MEMH_OFFSET_MAX = 2047;
105 const int Hexagon_MEMH_OFFSET_MIN = -2048;
106 const int Hexagon_MEMB_OFFSET_MAX = 1023;
107 const int Hexagon_MEMB_OFFSET_MIN = -1024;
108 const int Hexagon_ADDI_OFFSET_MAX = 32767;
109 const int Hexagon_ADDI_OFFSET_MIN = -32768;
111 const int Hexagon_MEMD_AUTOINC_MIN = -64;
113 const int Hexagon_MEMW_AUTOINC_MIN = -32;
115 const int Hexagon_MEMH_AUTOINC_MIN = -16;
118 const int Hexagon_MEMV_AUTOINC_MAX = 192; // #s3
119 const int Hexagon_MEMV_AUTOINC_MIN = -256; // #s3
120 const int Hexagon_MEMV_AUTOINC_MAX_128B = 384; // #s3
121 const int Hexagon_MEMV_AUTOINC_MIN_128B = -512; // #s3
122 
123 // Pin the vtable to this file.
124 void HexagonInstrInfo::anchor() {}
125 
127  : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
128  RI() {}
129 
130 static bool isIntRegForSubInst(unsigned Reg) {
131  return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
132  (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
133 }
134 
135 static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI) {
136  return isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_lo)) &&
137  isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_hi));
138 }
139 
140 /// Calculate number of instructions excluding the debug instructions.
143  unsigned Count = 0;
144  for (; MIB != MIE; ++MIB) {
145  if (!MIB->isDebugValue())
146  ++Count;
147  }
148  return Count;
149 }
150 
151 /// Find the hardware loop instruction used to set-up the specified loop.
152 /// On Hexagon, we have two instructions used to set-up the hardware loop
153 /// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions
154 /// to indicate the end of a loop.
155 static MachineInstr *findLoopInstr(MachineBasicBlock *BB, int EndLoopOp,
157  int LOOPi;
158  int LOOPr;
159  if (EndLoopOp == Hexagon::ENDLOOP0) {
160  LOOPi = Hexagon::J2_loop0i;
161  LOOPr = Hexagon::J2_loop0r;
162  } else { // EndLoopOp == Hexagon::EndLOOP1
163  LOOPi = Hexagon::J2_loop1i;
164  LOOPr = Hexagon::J2_loop1r;
165  }
166 
167  // The loop set-up instruction will be in a predecessor block
169  PE = BB->pred_end(); PB != PE; ++PB) {
170  // If this has been visited, already skip it.
171  if (!Visited.insert(*PB).second)
172  continue;
173  if (*PB == BB)
174  continue;
175  for (MachineBasicBlock::reverse_instr_iterator I = (*PB)->instr_rbegin(),
176  E = (*PB)->instr_rend(); I != E; ++I) {
177  int Opc = I->getOpcode();
178  if (Opc == LOOPi || Opc == LOOPr)
179  return &*I;
180  // We've reached a different loop, which means the loop0 has been removed.
181  if (Opc == EndLoopOp)
182  return nullptr;
183  }
184  // Check the predecessors for the LOOP instruction.
185  MachineInstr *loop = findLoopInstr(*PB, EndLoopOp, Visited);
186  if (loop)
187  return loop;
188  }
189  return nullptr;
190 }
191 
192 /// Gather register def/uses from MI.
193 /// This treats possible (predicated) defs as actually happening ones
194 /// (conservatively).
195 static inline void parseOperands(const MachineInstr &MI,
197  Defs.clear();
198  Uses.clear();
199 
200  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201  const MachineOperand &MO = MI.getOperand(i);
202 
203  if (!MO.isReg())
204  continue;
205 
206  unsigned Reg = MO.getReg();
207  if (!Reg)
208  continue;
209 
210  if (MO.isUse())
211  Uses.push_back(MO.getReg());
212 
213  if (MO.isDef())
214  Defs.push_back(MO.getReg());
215  }
216 }
217 
218 // Position dependent, so check twice for swap.
219 static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) {
220  switch (Ga) {
222  default:
223  return false;
224  case HexagonII::HSIG_L1:
225  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_A);
226  case HexagonII::HSIG_L2:
227  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
228  Gb == HexagonII::HSIG_A);
229  case HexagonII::HSIG_S1:
230  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
231  Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_A);
232  case HexagonII::HSIG_S2:
233  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
234  Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_S2 ||
235  Gb == HexagonII::HSIG_A);
236  case HexagonII::HSIG_A:
237  return (Gb == HexagonII::HSIG_A);
239  return (Gb == HexagonII::HSIG_Compound);
240  }
241  return false;
242 }
243 
244 /// isLoadFromStackSlot - If the specified machine instruction is a direct
245 /// load from a stack slot, return the virtual or physical register number of
246 /// the destination along with the FrameIndex of the loaded stack slot. If
247 /// not, return 0. This predicate must return 0 if the instruction has
248 /// any side effects other than loading from the stack slot.
250  int &FrameIndex) const {
251  switch (MI.getOpcode()) {
252  default:
253  break;
254  case Hexagon::L2_loadri_io:
255  case Hexagon::L2_loadrd_io:
256  case Hexagon::V6_vL32b_ai:
257  case Hexagon::V6_vL32b_ai_128B:
258  case Hexagon::V6_vL32Ub_ai:
259  case Hexagon::V6_vL32Ub_ai_128B:
260  case Hexagon::LDriw_pred:
261  case Hexagon::LDriw_mod:
262  case Hexagon::PS_vloadrq_ai:
263  case Hexagon::PS_vloadrw_ai:
264  case Hexagon::PS_vloadrq_ai_128B:
265  case Hexagon::PS_vloadrw_ai_128B: {
266  const MachineOperand OpFI = MI.getOperand(1);
267  if (!OpFI.isFI())
268  return 0;
269  const MachineOperand OpOff = MI.getOperand(2);
270  if (!OpOff.isImm() || OpOff.getImm() != 0)
271  return 0;
272  FrameIndex = OpFI.getIndex();
273  return MI.getOperand(0).getReg();
274  }
275 
276  case Hexagon::L2_ploadrit_io:
277  case Hexagon::L2_ploadrif_io:
278  case Hexagon::L2_ploadrdt_io:
279  case Hexagon::L2_ploadrdf_io: {
280  const MachineOperand OpFI = MI.getOperand(2);
281  if (!OpFI.isFI())
282  return 0;
283  const MachineOperand OpOff = MI.getOperand(3);
284  if (!OpOff.isImm() || OpOff.getImm() != 0)
285  return 0;
286  FrameIndex = OpFI.getIndex();
287  return MI.getOperand(0).getReg();
288  }
289  }
290 
291  return 0;
292 }
293 
294 /// isStoreToStackSlot - If the specified machine instruction is a direct
295 /// store to a stack slot, return the virtual or physical register number of
296 /// the source reg along with the FrameIndex of the loaded stack slot. If
297 /// not, return 0. This predicate must return 0 if the instruction has
298 /// any side effects other than storing to the stack slot.
300  int &FrameIndex) const {
301  switch (MI.getOpcode()) {
302  default:
303  break;
304  case Hexagon::S2_storerb_io:
305  case Hexagon::S2_storerh_io:
306  case Hexagon::S2_storeri_io:
307  case Hexagon::S2_storerd_io:
308  case Hexagon::V6_vS32b_ai:
309  case Hexagon::V6_vS32b_ai_128B:
310  case Hexagon::V6_vS32Ub_ai:
311  case Hexagon::V6_vS32Ub_ai_128B:
312  case Hexagon::STriw_pred:
313  case Hexagon::STriw_mod:
314  case Hexagon::PS_vstorerq_ai:
315  case Hexagon::PS_vstorerw_ai:
316  case Hexagon::PS_vstorerq_ai_128B:
317  case Hexagon::PS_vstorerw_ai_128B: {
318  const MachineOperand &OpFI = MI.getOperand(0);
319  if (!OpFI.isFI())
320  return 0;
321  const MachineOperand &OpOff = MI.getOperand(1);
322  if (!OpOff.isImm() || OpOff.getImm() != 0)
323  return 0;
324  FrameIndex = OpFI.getIndex();
325  return MI.getOperand(2).getReg();
326  }
327 
328  case Hexagon::S2_pstorerbt_io:
329  case Hexagon::S2_pstorerbf_io:
330  case Hexagon::S2_pstorerht_io:
331  case Hexagon::S2_pstorerhf_io:
332  case Hexagon::S2_pstorerit_io:
333  case Hexagon::S2_pstorerif_io:
334  case Hexagon::S2_pstorerdt_io:
335  case Hexagon::S2_pstorerdf_io: {
336  const MachineOperand &OpFI = MI.getOperand(1);
337  if (!OpFI.isFI())
338  return 0;
339  const MachineOperand &OpOff = MI.getOperand(2);
340  if (!OpOff.isImm() || OpOff.getImm() != 0)
341  return 0;
342  FrameIndex = OpFI.getIndex();
343  return MI.getOperand(3).getReg();
344  }
345  }
346 
347  return 0;
348 }
349 
350 /// This function can analyze one/two way branching only and should (mostly) be
351 /// called by target independent side.
352 /// First entry is always the opcode of the branching instruction, except when
353 /// the Cond vector is supposed to be empty, e.g., when AnalyzeBranch fails, a
354 /// BB with only unconditional jump. Subsequent entries depend upon the opcode,
355 /// e.g. Jump_c p will have
356 /// Cond[0] = Jump_c
357 /// Cond[1] = p
358 /// HW-loop ENDLOOP:
359 /// Cond[0] = ENDLOOP
360 /// Cond[1] = MBB
361 /// New value jump:
362 /// Cond[0] = Hexagon::CMPEQri_f_Jumpnv_t_V4 -- specific opcode
363 /// Cond[1] = R
364 /// Cond[2] = Imm
365 ///
367  MachineBasicBlock *&TBB,
368  MachineBasicBlock *&FBB,
370  bool AllowModify) const {
371  TBB = nullptr;
372  FBB = nullptr;
373  Cond.clear();
374 
375  // If the block has no terminators, it just falls into the block after it.
377  if (I == MBB.instr_begin())
378  return false;
379 
380  // A basic block may looks like this:
381  //
382  // [ insn
383  // EH_LABEL
384  // insn
385  // insn
386  // insn
387  // EH_LABEL
388  // insn ]
389  //
390  // It has two succs but does not have a terminator
391  // Don't know how to handle it.
392  do {
393  --I;
394  if (I->isEHLabel())
395  // Don't analyze EH branches.
396  return true;
397  } while (I != MBB.instr_begin());
398 
399  I = MBB.instr_end();
400  --I;
401 
402  while (I->isDebugValue()) {
403  if (I == MBB.instr_begin())
404  return false;
405  --I;
406  }
407 
408  bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump &&
409  I->getOperand(0).isMBB();
410  // Delete the J2_jump if it's equivalent to a fall-through.
411  if (AllowModify && JumpToBlock &&
412  MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
413  DEBUG(dbgs() << "\nErasing the jump to successor block\n";);
414  I->eraseFromParent();
415  I = MBB.instr_end();
416  if (I == MBB.instr_begin())
417  return false;
418  --I;
419  }
420  if (!isUnpredicatedTerminator(*I))
421  return false;
422 
423  // Get the last instruction in the block.
424  MachineInstr *LastInst = &*I;
425  MachineInstr *SecondLastInst = nullptr;
426  // Find one more terminator if present.
427  while (true) {
428  if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
429  if (!SecondLastInst)
430  SecondLastInst = &*I;
431  else
432  // This is a third branch.
433  return true;
434  }
435  if (I == MBB.instr_begin())
436  break;
437  --I;
438  }
439 
440  int LastOpcode = LastInst->getOpcode();
441  int SecLastOpcode = SecondLastInst ? SecondLastInst->getOpcode() : 0;
442  // If the branch target is not a basic block, it could be a tail call.
443  // (It is, if the target is a function.)
444  if (LastOpcode == Hexagon::J2_jump && !LastInst->getOperand(0).isMBB())
445  return true;
446  if (SecLastOpcode == Hexagon::J2_jump &&
447  !SecondLastInst->getOperand(0).isMBB())
448  return true;
449 
450  bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
451  bool LastOpcodeHasNVJump = isNewValueJump(*LastInst);
452 
453  if (LastOpcodeHasJMP_c && !LastInst->getOperand(1).isMBB())
454  return true;
455 
456  // If there is only one terminator instruction, process it.
457  if (LastInst && !SecondLastInst) {
458  if (LastOpcode == Hexagon::J2_jump) {
459  TBB = LastInst->getOperand(0).getMBB();
460  return false;
461  }
462  if (isEndLoopN(LastOpcode)) {
463  TBB = LastInst->getOperand(0).getMBB();
464  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
465  Cond.push_back(LastInst->getOperand(0));
466  return false;
467  }
468  if (LastOpcodeHasJMP_c) {
469  TBB = LastInst->getOperand(1).getMBB();
470  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
471  Cond.push_back(LastInst->getOperand(0));
472  return false;
473  }
474  // Only supporting rr/ri versions of new-value jumps.
475  if (LastOpcodeHasNVJump && (LastInst->getNumExplicitOperands() == 3)) {
476  TBB = LastInst->getOperand(2).getMBB();
477  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
478  Cond.push_back(LastInst->getOperand(0));
479  Cond.push_back(LastInst->getOperand(1));
480  return false;
481  }
482  DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber()
483  << " with one jump\n";);
484  // Otherwise, don't know what this is.
485  return true;
486  }
487 
488  bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
489  bool SecLastOpcodeHasNVJump = isNewValueJump(*SecondLastInst);
490  if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
491  if (!SecondLastInst->getOperand(1).isMBB())
492  return true;
493  TBB = SecondLastInst->getOperand(1).getMBB();
494  Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
495  Cond.push_back(SecondLastInst->getOperand(0));
496  FBB = LastInst->getOperand(0).getMBB();
497  return false;
498  }
499 
500  // Only supporting rr/ri versions of new-value jumps.
501  if (SecLastOpcodeHasNVJump &&
502  (SecondLastInst->getNumExplicitOperands() == 3) &&
503  (LastOpcode == Hexagon::J2_jump)) {
504  TBB = SecondLastInst->getOperand(2).getMBB();
505  Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
506  Cond.push_back(SecondLastInst->getOperand(0));
507  Cond.push_back(SecondLastInst->getOperand(1));
508  FBB = LastInst->getOperand(0).getMBB();
509  return false;
510  }
511 
512  // If the block ends with two Hexagon:JMPs, handle it. The second one is not
513  // executed, so remove it.
514  if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
515  TBB = SecondLastInst->getOperand(0).getMBB();
516  I = LastInst->getIterator();
517  if (AllowModify)
518  I->eraseFromParent();
519  return false;
520  }
521 
522  // If the block ends with an ENDLOOP, and J2_jump, handle it.
523  if (isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
524  TBB = SecondLastInst->getOperand(0).getMBB();
525  Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
526  Cond.push_back(SecondLastInst->getOperand(0));
527  FBB = LastInst->getOperand(0).getMBB();
528  return false;
529  }
530  DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber()
531  << " with two jumps";);
532  // Otherwise, can't handle this.
533  return true;
534 }
535 
537  int *BytesRemoved) const {
538  assert(!BytesRemoved && "code size not handled");
539 
540  DEBUG(dbgs() << "\nRemoving branches out of BB#" << MBB.getNumber());
542  unsigned Count = 0;
543  while (I != MBB.begin()) {
544  --I;
545  if (I->isDebugValue())
546  continue;
547  // Only removing branches from end of MBB.
548  if (!I->isBranch())
549  return Count;
550  if (Count && (I->getOpcode() == Hexagon::J2_jump))
551  llvm_unreachable("Malformed basic block: unconditional branch not last");
552  MBB.erase(&MBB.back());
553  I = MBB.end();
554  ++Count;
555  }
556  return Count;
557 }
558 
560  MachineBasicBlock *TBB,
561  MachineBasicBlock *FBB,
563  const DebugLoc &DL,
564  int *BytesAdded) const {
565  unsigned BOpc = Hexagon::J2_jump;
566  unsigned BccOpc = Hexagon::J2_jumpt;
567  assert(validateBranchCond(Cond) && "Invalid branching condition");
568  assert(TBB && "insertBranch must not be told to insert a fallthrough");
569  assert(!BytesAdded && "code size not handled");
570 
571  // Check if reverseBranchCondition has asked to reverse this branch
572  // If we want to reverse the branch an odd number of times, we want
573  // J2_jumpf.
574  if (!Cond.empty() && Cond[0].isImm())
575  BccOpc = Cond[0].getImm();
576 
577  if (!FBB) {
578  if (Cond.empty()) {
579  // Due to a bug in TailMerging/CFG Optimization, we need to add a
580  // special case handling of a predicated jump followed by an
581  // unconditional jump. If not, Tail Merging and CFG Optimization go
582  // into an infinite loop.
583  MachineBasicBlock *NewTBB, *NewFBB;
585  auto Term = MBB.getFirstTerminator();
586  if (Term != MBB.end() && isPredicated(*Term) &&
587  !analyzeBranch(MBB, NewTBB, NewFBB, Cond, false) &&
588  MachineFunction::iterator(NewTBB) == ++MBB.getIterator()) {
590  removeBranch(MBB);
591  return insertBranch(MBB, TBB, nullptr, Cond, DL);
592  }
593  BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
594  } else if (isEndLoopN(Cond[0].getImm())) {
595  int EndLoopOp = Cond[0].getImm();
596  assert(Cond[1].isMBB());
597  // Since we're adding an ENDLOOP, there better be a LOOP instruction.
598  // Check for it, and change the BB target if needed.
600  MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, VisitedBBs);
601  assert(Loop != 0 && "Inserting an ENDLOOP without a LOOP");
602  Loop->getOperand(0).setMBB(TBB);
603  // Add the ENDLOOP after the finding the LOOP0.
604  BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
605  } else if (isNewValueJump(Cond[0].getImm())) {
606  assert((Cond.size() == 3) && "Only supporting rr/ri version of nvjump");
607  // New value jump
608  // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
609  // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
610  unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
611  DEBUG(dbgs() << "\nInserting NVJump for BB#" << MBB.getNumber(););
612  if (Cond[2].isReg()) {
613  unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
614  BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
615  addReg(Cond[2].getReg(), Flags2).addMBB(TBB);
616  } else if(Cond[2].isImm()) {
617  BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
618  addImm(Cond[2].getImm()).addMBB(TBB);
619  } else
620  llvm_unreachable("Invalid condition for branching");
621  } else {
622  assert((Cond.size() == 2) && "Malformed cond vector");
623  const MachineOperand &RO = Cond[1];
624  unsigned Flags = getUndefRegState(RO.isUndef());
625  BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
626  }
627  return 1;
628  }
629  assert((!Cond.empty()) &&
630  "Cond. cannot be empty when multiple branchings are required");
631  assert((!isNewValueJump(Cond[0].getImm())) &&
632  "NV-jump cannot be inserted with another branch");
633  // Special case for hardware loops. The condition is a basic block.
634  if (isEndLoopN(Cond[0].getImm())) {
635  int EndLoopOp = Cond[0].getImm();
636  assert(Cond[1].isMBB());
637  // Since we're adding an ENDLOOP, there better be a LOOP instruction.
638  // Check for it, and change the BB target if needed.
640  MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, VisitedBBs);
641  assert(Loop != 0 && "Inserting an ENDLOOP without a LOOP");
642  Loop->getOperand(0).setMBB(TBB);
643  // Add the ENDLOOP after the finding the LOOP0.
644  BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
645  } else {
646  const MachineOperand &RO = Cond[1];
647  unsigned Flags = getUndefRegState(RO.isUndef());
648  BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
649  }
650  BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
651 
652  return 2;
653 }
654 
655 /// Analyze the loop code to find the loop induction variable and compare used
656 /// to compute the number of iterations. Currently, we analyze loop that are
657 /// controlled using hardware loops. In this case, the induction variable
658 /// instruction is null. For all other cases, this function returns true, which
659 /// means we're unable to analyze it.
661  MachineInstr *&IndVarInst,
662  MachineInstr *&CmpInst) const {
663 
664  MachineBasicBlock *LoopEnd = L.getBottomBlock();
666  // We really "analyze" only hardware loops right now.
667  if (I != LoopEnd->end() && isEndLoopN(I->getOpcode())) {
668  IndVarInst = nullptr;
669  CmpInst = &*I;
670  return false;
671  }
672  return true;
673 }
674 
675 /// Generate code to reduce the loop iteration by one and check if the loop is
676 /// finished. Return the value/register of the new loop count. this function
677 /// assumes the nth iteration is peeled first.
679  MachineInstr *IndVar, MachineInstr &Cmp,
682  unsigned Iter, unsigned MaxIter) const {
683  // We expect a hardware loop currently. This means that IndVar is set
684  // to null, and the compare is the ENDLOOP instruction.
685  assert((!IndVar) && isEndLoopN(Cmp.getOpcode())
686  && "Expecting a hardware loop");
687  MachineFunction *MF = MBB.getParent();
688  DebugLoc DL = Cmp.getDebugLoc();
690  MachineInstr *Loop = findLoopInstr(&MBB, Cmp.getOpcode(), VisitedBBs);
691  if (!Loop)
692  return 0;
693  // If the loop trip count is a compile-time value, then just change the
694  // value.
695  if (Loop->getOpcode() == Hexagon::J2_loop0i ||
696  Loop->getOpcode() == Hexagon::J2_loop1i) {
697  int64_t Offset = Loop->getOperand(1).getImm();
698  if (Offset <= 1)
699  Loop->eraseFromParent();
700  else
701  Loop->getOperand(1).setImm(Offset - 1);
702  return Offset - 1;
703  }
704  // The loop trip count is a run-time value. We generate code to subtract
705  // one from the trip count, and update the loop instruction.
706  assert(Loop->getOpcode() == Hexagon::J2_loop0r && "Unexpected instruction");
707  unsigned LoopCount = Loop->getOperand(1).getReg();
708  // Check if we're done with the loop.
709  unsigned LoopEnd = createVR(MF, MVT::i1);
710  MachineInstr *NewCmp = BuildMI(&MBB, DL, get(Hexagon::C2_cmpgtui), LoopEnd).
711  addReg(LoopCount).addImm(1);
712  unsigned NewLoopCount = createVR(MF, MVT::i32);
713  MachineInstr *NewAdd = BuildMI(&MBB, DL, get(Hexagon::A2_addi), NewLoopCount).
714  addReg(LoopCount).addImm(-1);
715  // Update the previously generated instructions with the new loop counter.
717  E = PrevInsts.end(); I != E; ++I)
718  (*I)->substituteRegister(LoopCount, NewLoopCount, 0, getRegisterInfo());
719  PrevInsts.clear();
720  PrevInsts.push_back(NewCmp);
721  PrevInsts.push_back(NewAdd);
722  // Insert the new loop instruction if this is the last time the loop is
723  // decremented.
724  if (Iter == MaxIter)
725  BuildMI(&MBB, DL, get(Hexagon::J2_loop0r)).
726  addMBB(Loop->getOperand(0).getMBB()).addReg(NewLoopCount);
727  // Delete the old loop instruction.
728  if (Iter == 0)
729  Loop->eraseFromParent();
730  Cond.push_back(MachineOperand::CreateImm(Hexagon::J2_jumpf));
731  Cond.push_back(NewCmp->getOperand(0));
732  return NewLoopCount;
733 }
734 
736  unsigned NumCycles, unsigned ExtraPredCycles,
737  BranchProbability Probability) const {
738  return nonDbgBBSize(&MBB) <= 3;
739 }
740 
742  unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB,
743  unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability)
744  const {
745  return nonDbgBBSize(&TMBB) <= 3 && nonDbgBBSize(&FMBB) <= 3;
746 }
747 
749  unsigned NumInstrs, BranchProbability Probability) const {
750  return NumInstrs <= 4;
751 }
752 
755  const DebugLoc &DL, unsigned DestReg,
756  unsigned SrcReg, bool KillSrc) const {
757  auto &HRI = getRegisterInfo();
758  unsigned KillFlag = getKillRegState(KillSrc);
759 
760  if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
761  BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg)
762  .addReg(SrcReg, KillFlag);
763  return;
764  }
765  if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
766  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg)
767  .addReg(SrcReg, KillFlag);
768  return;
769  }
770  if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
771  // Map Pd = Ps to Pd = or(Ps, Ps).
772  BuildMI(MBB, I, DL, get(Hexagon::C2_or), DestReg)
773  .addReg(SrcReg).addReg(SrcReg, KillFlag);
774  return;
775  }
776  if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
777  Hexagon::IntRegsRegClass.contains(SrcReg)) {
778  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
779  .addReg(SrcReg, KillFlag);
780  return;
781  }
782  if (Hexagon::IntRegsRegClass.contains(DestReg) &&
783  Hexagon::CtrRegsRegClass.contains(SrcReg)) {
784  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrcrr), DestReg)
785  .addReg(SrcReg, KillFlag);
786  return;
787  }
788  if (Hexagon::ModRegsRegClass.contains(DestReg) &&
789  Hexagon::IntRegsRegClass.contains(SrcReg)) {
790  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
791  .addReg(SrcReg, KillFlag);
792  return;
793  }
794  if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
795  Hexagon::IntRegsRegClass.contains(DestReg)) {
796  BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
797  .addReg(SrcReg, KillFlag);
798  return;
799  }
800  if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
801  Hexagon::PredRegsRegClass.contains(DestReg)) {
802  BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg)
803  .addReg(SrcReg, KillFlag);
804  return;
805  }
806  if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
807  Hexagon::IntRegsRegClass.contains(DestReg)) {
808  BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
809  .addReg(SrcReg, KillFlag);
810  return;
811  }
812  if (Hexagon::VectorRegsRegClass.contains(SrcReg, DestReg)) {
813  BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
814  addReg(SrcReg, KillFlag);
815  return;
816  }
817  if (Hexagon::VecDblRegsRegClass.contains(SrcReg, DestReg)) {
818  unsigned LoSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
819  unsigned HiSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
820  BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg)
821  .addReg(HiSrc, KillFlag)
822  .addReg(LoSrc, KillFlag);
823  return;
824  }
825  if (Hexagon::VecPredRegsRegClass.contains(SrcReg, DestReg)) {
826  BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg)
827  .addReg(SrcReg)
828  .addReg(SrcReg, KillFlag);
829  return;
830  }
831  if (Hexagon::VecPredRegsRegClass.contains(SrcReg) &&
832  Hexagon::VectorRegsRegClass.contains(DestReg)) {
833  llvm_unreachable("Unimplemented pred to vec");
834  return;
835  }
836  if (Hexagon::VecPredRegsRegClass.contains(DestReg) &&
837  Hexagon::VectorRegsRegClass.contains(SrcReg)) {
838  llvm_unreachable("Unimplemented vec to pred");
839  return;
840  }
841  if (Hexagon::VecPredRegs128BRegClass.contains(SrcReg, DestReg)) {
842  unsigned HiDst = HRI.getSubReg(DestReg, Hexagon::vsub_hi);
843  unsigned LoDst = HRI.getSubReg(DestReg, Hexagon::vsub_lo);
844  unsigned HiSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
845  unsigned LoSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
846  BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), HiDst)
847  .addReg(HiSrc, KillFlag);
848  BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), LoDst)
849  .addReg(LoSrc, KillFlag);
850  return;
851  }
852 
853 #ifndef NDEBUG
854  // Show the invalid registers to ease debugging.
855  dbgs() << "Invalid registers for copy in BB#" << MBB.getNumber()
856  << ": " << PrintReg(DestReg, &HRI)
857  << " = " << PrintReg(SrcReg, &HRI) << '\n';
858 #endif
859  llvm_unreachable("Unimplemented");
860 }
861 
863  MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI,
864  const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const {
865  DebugLoc DL = MBB.findDebugLoc(I);
866  MachineFunction &MF = *MBB.getParent();
867  MachineFrameInfo &MFI = MF.getFrameInfo();
868  unsigned Align = MFI.getObjectAlignment(FI);
869  unsigned KillFlag = getKillRegState(isKill);
870 
873  MFI.getObjectSize(FI), Align);
874 
875  if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
876  BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
877  .addFrameIndex(FI).addImm(0)
878  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
879  } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
880  BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
881  .addFrameIndex(FI).addImm(0)
882  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
883  } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
884  BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
885  .addFrameIndex(FI).addImm(0)
886  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
887  } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
888  BuildMI(MBB, I, DL, get(Hexagon::STriw_mod))
889  .addFrameIndex(FI).addImm(0)
890  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
891  } else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) {
892  BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai_128B))
893  .addFrameIndex(FI).addImm(0)
894  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
895  } else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) {
896  BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai))
897  .addFrameIndex(FI).addImm(0)
898  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
899  } else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) {
900  unsigned Opc = Align < 128 ? Hexagon::V6_vS32Ub_ai_128B
901  : Hexagon::V6_vS32b_ai_128B;
902  BuildMI(MBB, I, DL, get(Opc))
903  .addFrameIndex(FI).addImm(0)
904  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
905  } else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) {
906  unsigned Opc = Align < 64 ? Hexagon::V6_vS32Ub_ai
907  : Hexagon::V6_vS32b_ai;
908  BuildMI(MBB, I, DL, get(Opc))
909  .addFrameIndex(FI).addImm(0)
910  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
911  } else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) {
912  unsigned Opc = Align < 64 ? Hexagon::PS_vstorerwu_ai
913  : Hexagon::PS_vstorerw_ai;
914  BuildMI(MBB, I, DL, get(Opc))
915  .addFrameIndex(FI).addImm(0)
916  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
917  } else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) {
918  unsigned Opc = Align < 128 ? Hexagon::PS_vstorerwu_ai_128B
919  : Hexagon::PS_vstorerw_ai_128B;
920  BuildMI(MBB, I, DL, get(Opc))
921  .addFrameIndex(FI).addImm(0)
922  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
923  } else {
924  llvm_unreachable("Unimplemented");
925  }
926 }
927 
930  int FI, const TargetRegisterClass *RC,
931  const TargetRegisterInfo *TRI) const {
932  DebugLoc DL = MBB.findDebugLoc(I);
933  MachineFunction &MF = *MBB.getParent();
934  MachineFrameInfo &MFI = MF.getFrameInfo();
935  unsigned Align = MFI.getObjectAlignment(FI);
936 
939  MFI.getObjectSize(FI), Align);
940 
941  if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
942  BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
943  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
944  } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
945  BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
946  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
947  } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
948  BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
949  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
950  } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
951  BuildMI(MBB, I, DL, get(Hexagon::LDriw_mod), DestReg)
952  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
953  } else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) {
954  BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai_128B), DestReg)
955  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
956  } else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) {
957  BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg)
958  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
959  } else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) {
960  unsigned Opc = Align < 128 ? Hexagon::PS_vloadrwu_ai_128B
961  : Hexagon::PS_vloadrw_ai_128B;
962  BuildMI(MBB, I, DL, get(Opc), DestReg)
963  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
964  } else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) {
965  unsigned Opc = Align < 128 ? Hexagon::V6_vL32Ub_ai_128B
966  : Hexagon::V6_vL32b_ai_128B;
967  BuildMI(MBB, I, DL, get(Opc), DestReg)
968  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
969  } else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) {
970  unsigned Opc = Align < 64 ? Hexagon::V6_vL32Ub_ai
971  : Hexagon::V6_vL32b_ai;
972  BuildMI(MBB, I, DL, get(Opc), DestReg)
973  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
974  } else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) {
975  unsigned Opc = Align < 64 ? Hexagon::PS_vloadrwu_ai
976  : Hexagon::PS_vloadrw_ai;
977  BuildMI(MBB, I, DL, get(Opc), DestReg)
978  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
979  } else {
980  llvm_unreachable("Can't store this register to stack slot");
981  }
982 }
983 
985  const MachineBasicBlock &B = *MI.getParent();
986  Regs.addLiveOuts(B);
987  auto E = ++MachineBasicBlock::const_iterator(MI.getIterator()).getReverse();
988  for (auto I = B.rbegin(); I != E; ++I)
989  Regs.stepBackward(*I);
990 }
991 
992 /// expandPostRAPseudo - This function is called for all pseudo instructions
993 /// that remain after register allocation. Many pseudo instructions are
994 /// created to help register allocation. This is the place to convert them
995 /// into real instructions. The target can edit MI in place, or it can insert
996 /// new instructions and erase MI. The function should return true if
997 /// anything was changed.
999  const HexagonRegisterInfo &HRI = getRegisterInfo();
1001  MachineBasicBlock &MBB = *MI.getParent();
1002  DebugLoc DL = MI.getDebugLoc();
1003  unsigned Opc = MI.getOpcode();
1004  const unsigned VecOffset = 1;
1005 
1006  switch (Opc) {
1007  case TargetOpcode::COPY: {
1008  MachineOperand &MD = MI.getOperand(0);
1009  MachineOperand &MS = MI.getOperand(1);
1011  if (MD.getReg() != MS.getReg() && !MS.isUndef()) {
1012  copyPhysReg(MBB, MI, DL, MD.getReg(), MS.getReg(), MS.isKill());
1013  std::prev(MBBI)->copyImplicitOps(*MBB.getParent(), MI);
1014  }
1015  MBB.erase(MBBI);
1016  return true;
1017  }
1018  case Hexagon::PS_aligna:
1019  BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI.getOperand(0).getReg())
1020  .addReg(HRI.getFrameRegister())
1021  .addImm(-MI.getOperand(1).getImm());
1022  MBB.erase(MI);
1023  return true;
1024  case Hexagon::V6_vassignp_128B:
1025  case Hexagon::V6_vassignp: {
1026  unsigned SrcReg = MI.getOperand(1).getReg();
1027  unsigned DstReg = MI.getOperand(0).getReg();
1028  unsigned Kill = getKillRegState(MI.getOperand(1).isKill());
1029  BuildMI(MBB, MI, DL, get(Hexagon::V6_vcombine), DstReg)
1030  .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi), Kill)
1031  .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_lo), Kill);
1032  MBB.erase(MI);
1033  return true;
1034  }
1035  case Hexagon::V6_lo_128B:
1036  case Hexagon::V6_lo: {
1037  unsigned SrcReg = MI.getOperand(1).getReg();
1038  unsigned DstReg = MI.getOperand(0).getReg();
1039  unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1040  copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill());
1041  MBB.erase(MI);
1042  MRI.clearKillFlags(SrcSubLo);
1043  return true;
1044  }
1045  case Hexagon::V6_hi_128B:
1046  case Hexagon::V6_hi: {
1047  unsigned SrcReg = MI.getOperand(1).getReg();
1048  unsigned DstReg = MI.getOperand(0).getReg();
1049  unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1050  copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill());
1051  MBB.erase(MI);
1052  MRI.clearKillFlags(SrcSubHi);
1053  return true;
1054  }
1055  case Hexagon::PS_vstorerw_ai:
1056  case Hexagon::PS_vstorerwu_ai:
1057  case Hexagon::PS_vstorerw_ai_128B:
1058  case Hexagon::PS_vstorerwu_ai_128B: {
1059  bool Is128B = (Opc == Hexagon::PS_vstorerw_ai_128B ||
1060  Opc == Hexagon::PS_vstorerwu_ai_128B);
1061  bool Aligned = (Opc == Hexagon::PS_vstorerw_ai ||
1062  Opc == Hexagon::PS_vstorerw_ai_128B);
1063  unsigned SrcReg = MI.getOperand(2).getReg();
1064  unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1065  unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1066  unsigned NewOpc;
1067  if (Aligned)
1068  NewOpc = Is128B ? Hexagon::V6_vS32b_ai_128B
1069  : Hexagon::V6_vS32b_ai;
1070  else
1071  NewOpc = Is128B ? Hexagon::V6_vS32Ub_ai_128B
1072  : Hexagon::V6_vS32Ub_ai;
1073 
1074  unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6;
1075  MachineInstr *MI1New =
1076  BuildMI(MBB, MI, DL, get(NewOpc))
1077  .addOperand(MI.getOperand(0))
1078  .addImm(MI.getOperand(1).getImm())
1079  .addReg(SrcSubLo)
1081  MI1New->getOperand(0).setIsKill(false);
1082  BuildMI(MBB, MI, DL, get(NewOpc))
1083  .addOperand(MI.getOperand(0))
1084  // The Vectors are indexed in multiples of vector size.
1085  .addImm(MI.getOperand(1).getImm() + Offset)
1086  .addReg(SrcSubHi)
1088  MBB.erase(MI);
1089  return true;
1090  }
1091  case Hexagon::PS_vloadrw_ai:
1092  case Hexagon::PS_vloadrwu_ai:
1093  case Hexagon::PS_vloadrw_ai_128B:
1094  case Hexagon::PS_vloadrwu_ai_128B: {
1095  bool Is128B = (Opc == Hexagon::PS_vloadrw_ai_128B ||
1096  Opc == Hexagon::PS_vloadrwu_ai_128B);
1097  bool Aligned = (Opc == Hexagon::PS_vloadrw_ai ||
1098  Opc == Hexagon::PS_vloadrw_ai_128B);
1099  unsigned NewOpc;
1100  if (Aligned)
1101  NewOpc = Is128B ? Hexagon::V6_vL32b_ai_128B
1102  : Hexagon::V6_vL32b_ai;
1103  else
1104  NewOpc = Is128B ? Hexagon::V6_vL32Ub_ai_128B
1105  : Hexagon::V6_vL32Ub_ai;
1106 
1107  unsigned DstReg = MI.getOperand(0).getReg();
1108  unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6;
1109  MachineInstr *MI1New =
1110  BuildMI(MBB, MI, DL, get(NewOpc),
1111  HRI.getSubReg(DstReg, Hexagon::vsub_lo))
1112  .addOperand(MI.getOperand(1))
1113  .addImm(MI.getOperand(2).getImm());
1114  MI1New->getOperand(1).setIsKill(false);
1115  BuildMI(MBB, MI, DL, get(NewOpc),
1116  HRI.getSubReg(DstReg, Hexagon::vsub_hi))
1117  .addOperand(MI.getOperand(1))
1118  // The Vectors are indexed in multiples of vector size.
1119  .addImm(MI.getOperand(2).getImm() + Offset)
1120  .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1121  MBB.erase(MI);
1122  return true;
1123  }
1124  case Hexagon::PS_true: {
1125  unsigned Reg = MI.getOperand(0).getReg();
1126  BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg)
1127  .addReg(Reg, RegState::Undef)
1128  .addReg(Reg, RegState::Undef);
1129  MBB.erase(MI);
1130  return true;
1131  }
1132  case Hexagon::PS_false: {
1133  unsigned Reg = MI.getOperand(0).getReg();
1134  BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg)
1135  .addReg(Reg, RegState::Undef)
1136  .addReg(Reg, RegState::Undef);
1137  MBB.erase(MI);
1138  return true;
1139  }
1140  case Hexagon::PS_vmulw: {
1141  // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies.
1142  unsigned DstReg = MI.getOperand(0).getReg();
1143  unsigned Src1Reg = MI.getOperand(1).getReg();
1144  unsigned Src2Reg = MI.getOperand(2).getReg();
1145  unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1146  unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1147  unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1148  unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1149  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1150  HRI.getSubReg(DstReg, Hexagon::isub_hi))
1151  .addReg(Src1SubHi)
1152  .addReg(Src2SubHi);
1153  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1154  HRI.getSubReg(DstReg, Hexagon::isub_lo))
1155  .addReg(Src1SubLo)
1156  .addReg(Src2SubLo);
1157  MBB.erase(MI);
1158  MRI.clearKillFlags(Src1SubHi);
1159  MRI.clearKillFlags(Src1SubLo);
1160  MRI.clearKillFlags(Src2SubHi);
1161  MRI.clearKillFlags(Src2SubLo);
1162  return true;
1163  }
1164  case Hexagon::PS_vmulw_acc: {
1165  // Expand 64-bit vector multiply with addition into 2 scalar multiplies.
1166  unsigned DstReg = MI.getOperand(0).getReg();
1167  unsigned Src1Reg = MI.getOperand(1).getReg();
1168  unsigned Src2Reg = MI.getOperand(2).getReg();
1169  unsigned Src3Reg = MI.getOperand(3).getReg();
1170  unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1171  unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1172  unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1173  unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1174  unsigned Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi);
1175  unsigned Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo);
1176  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1177  HRI.getSubReg(DstReg, Hexagon::isub_hi))
1178  .addReg(Src1SubHi)
1179  .addReg(Src2SubHi)
1180  .addReg(Src3SubHi);
1181  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1182  HRI.getSubReg(DstReg, Hexagon::isub_lo))
1183  .addReg(Src1SubLo)
1184  .addReg(Src2SubLo)
1185  .addReg(Src3SubLo);
1186  MBB.erase(MI);
1187  MRI.clearKillFlags(Src1SubHi);
1188  MRI.clearKillFlags(Src1SubLo);
1189  MRI.clearKillFlags(Src2SubHi);
1190  MRI.clearKillFlags(Src2SubLo);
1191  MRI.clearKillFlags(Src3SubHi);
1192  MRI.clearKillFlags(Src3SubLo);
1193  return true;
1194  }
1195  case Hexagon::PS_pselect: {
1196  const MachineOperand &Op0 = MI.getOperand(0);
1197  const MachineOperand &Op1 = MI.getOperand(1);
1198  const MachineOperand &Op2 = MI.getOperand(2);
1199  const MachineOperand &Op3 = MI.getOperand(3);
1200  unsigned Rd = Op0.getReg();
1201  unsigned Pu = Op1.getReg();
1202  unsigned Rs = Op2.getReg();
1203  unsigned Rt = Op3.getReg();
1204  DebugLoc DL = MI.getDebugLoc();
1205  unsigned K1 = getKillRegState(Op1.isKill());
1206  unsigned K2 = getKillRegState(Op2.isKill());
1207  unsigned K3 = getKillRegState(Op3.isKill());
1208  if (Rd != Rs)
1209  BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpt), Rd)
1210  .addReg(Pu, (Rd == Rt) ? K1 : 0)
1211  .addReg(Rs, K2);
1212  if (Rd != Rt)
1213  BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpf), Rd)
1214  .addReg(Pu, K1)
1215  .addReg(Rt, K3);
1216  MBB.erase(MI);
1217  return true;
1218  }
1219  case Hexagon::PS_vselect:
1220  case Hexagon::PS_vselect_128B: {
1221  const MachineOperand &Op0 = MI.getOperand(0);
1222  const MachineOperand &Op1 = MI.getOperand(1);
1223  const MachineOperand &Op2 = MI.getOperand(2);
1224  const MachineOperand &Op3 = MI.getOperand(3);
1225  LivePhysRegs LiveAtMI(&HRI);
1226  getLiveRegsAt(LiveAtMI, MI);
1227  bool IsDestLive = !LiveAtMI.available(MRI, Op0.getReg());
1228  if (Op0.getReg() != Op2.getReg()) {
1229  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov))
1230  .addOperand(Op0)
1231  .addOperand(Op1)
1232  .addOperand(Op2);
1233  if (IsDestLive)
1234  T.addReg(Op0.getReg(), RegState::Implicit);
1235  IsDestLive = true;
1236  }
1237  if (Op0.getReg() != Op3.getReg()) {
1238  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov))
1239  .addOperand(Op0)
1240  .addOperand(Op1)
1241  .addOperand(Op3);
1242  if (IsDestLive)
1243  T.addReg(Op0.getReg(), RegState::Implicit);
1244  }
1245  MBB.erase(MI);
1246  return true;
1247  }
1248  case Hexagon::PS_wselect:
1249  case Hexagon::PS_wselect_128B: {
1250  MachineOperand &Op0 = MI.getOperand(0);
1251  MachineOperand &Op1 = MI.getOperand(1);
1252  MachineOperand &Op2 = MI.getOperand(2);
1253  MachineOperand &Op3 = MI.getOperand(3);
1254  LivePhysRegs LiveAtMI(&HRI);
1255  getLiveRegsAt(LiveAtMI, MI);
1256  bool IsDestLive = !LiveAtMI.available(MRI, Op0.getReg());
1257 
1258  if (Op0.getReg() != Op2.getReg()) {
1259  unsigned SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo);
1260  unsigned SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi);
1261  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine))
1262  .addOperand(Op0)
1263  .addOperand(Op1)
1264  .addReg(SrcHi)
1265  .addReg(SrcLo);
1266  if (IsDestLive)
1267  T.addReg(Op0.getReg(), RegState::Implicit);
1268  IsDestLive = true;
1269  }
1270  if (Op0.getReg() != Op3.getReg()) {
1271  unsigned SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo);
1272  unsigned SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi);
1273  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine))
1274  .addOperand(Op0)
1275  .addOperand(Op1)
1276  .addReg(SrcHi)
1277  .addReg(SrcLo);
1278  if (IsDestLive)
1279  T.addReg(Op0.getReg(), RegState::Implicit);
1280  }
1281  MBB.erase(MI);
1282  return true;
1283  }
1284  case Hexagon::PS_tailcall_i:
1285  MI.setDesc(get(Hexagon::J2_jump));
1286  return true;
1287  case Hexagon::PS_tailcall_r:
1288  case Hexagon::PS_jmpret:
1289  MI.setDesc(get(Hexagon::J2_jumpr));
1290  return true;
1291  case Hexagon::PS_jmprett:
1292  MI.setDesc(get(Hexagon::J2_jumprt));
1293  return true;
1294  case Hexagon::PS_jmpretf:
1295  MI.setDesc(get(Hexagon::J2_jumprf));
1296  return true;
1297  case Hexagon::PS_jmprettnewpt:
1298  MI.setDesc(get(Hexagon::J2_jumprtnewpt));
1299  return true;
1300  case Hexagon::PS_jmpretfnewpt:
1301  MI.setDesc(get(Hexagon::J2_jumprfnewpt));
1302  return true;
1303  case Hexagon::PS_jmprettnew:
1304  MI.setDesc(get(Hexagon::J2_jumprtnew));
1305  return true;
1306  case Hexagon::PS_jmpretfnew:
1307  MI.setDesc(get(Hexagon::J2_jumprfnew));
1308  return true;
1309  }
1310 
1311  return false;
1312 }
1313 
1314 // We indicate that we want to reverse the branch by
1315 // inserting the reversed branching opcode.
1317  SmallVectorImpl<MachineOperand> &Cond) const {
1318  if (Cond.empty())
1319  return true;
1320  assert(Cond[0].isImm() && "First entry in the cond vector not imm-val");
1321  unsigned opcode = Cond[0].getImm();
1322  //unsigned temp;
1323  assert(get(opcode).isBranch() && "Should be a branching condition.");
1324  if (isEndLoopN(opcode))
1325  return true;
1326  unsigned NewOpcode = getInvertedPredicatedOpcode(opcode);
1327  Cond[0].setImm(NewOpcode);
1328  return false;
1329 }
1330 
1333  DebugLoc DL;
1334  BuildMI(MBB, MI, DL, get(Hexagon::A2_nop));
1335 }
1336 
1338  return getAddrMode(MI) == HexagonII::PostInc;
1339 }
1340 
1341 // Returns true if an instruction is predicated irrespective of the predicate
1342 // sense. For example, all of the following will return true.
1343 // if (p0) R1 = add(R2, R3)
1344 // if (!p0) R1 = add(R2, R3)
1345 // if (p0.new) R1 = add(R2, R3)
1346 // if (!p0.new) R1 = add(R2, R3)
1347 // Note: New-value stores are not included here as in the current
1348 // implementation, we don't need to check their predicate sense.
1350  const uint64_t F = MI.getDesc().TSFlags;
1352 }
1353 
1355  MachineInstr &MI, ArrayRef<MachineOperand> Cond) const {
1356  if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
1357  isEndLoopN(Cond[0].getImm())) {
1358  DEBUG(dbgs() << "\nCannot predicate:"; MI.dump(););
1359  return false;
1360  }
1361  int Opc = MI.getOpcode();
1362  assert (isPredicable(MI) && "Expected predicable instruction");
1363  bool invertJump = predOpcodeHasNot(Cond);
1364 
1365  // We have to predicate MI "in place", i.e. after this function returns,
1366  // MI will need to be transformed into a predicated form. To avoid com-
1367  // plicated manipulations with the operands (handling tied operands,
1368  // etc.), build a new temporary instruction, then overwrite MI with it.
1369 
1370  MachineBasicBlock &B = *MI.getParent();
1371  DebugLoc DL = MI.getDebugLoc();
1372  unsigned PredOpc = getCondOpcode(Opc, invertJump);
1373  MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc));
1374  unsigned NOp = 0, NumOps = MI.getNumOperands();
1375  while (NOp < NumOps) {
1376  MachineOperand &Op = MI.getOperand(NOp);
1377  if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1378  break;
1379  T.addOperand(Op);
1380  NOp++;
1381  }
1382 
1383  unsigned PredReg, PredRegPos, PredRegFlags;
1384  bool GotPredReg = getPredReg(Cond, PredReg, PredRegPos, PredRegFlags);
1385  (void)GotPredReg;
1386  assert(GotPredReg);
1387  T.addReg(PredReg, PredRegFlags);
1388  while (NOp < NumOps)
1389  T.addOperand(MI.getOperand(NOp++));
1390 
1391  MI.setDesc(get(PredOpc));
1392  while (unsigned n = MI.getNumOperands())
1393  MI.RemoveOperand(n-1);
1394  for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i)
1395  MI.addOperand(T->getOperand(i));
1396 
1398  B.erase(TI);
1399 
1401  MRI.clearKillFlags(PredReg);
1402  return true;
1403 }
1404 
1406  ArrayRef<MachineOperand> Pred2) const {
1407  // TODO: Fix this
1408  return false;
1409 }
1410 
1412  MachineInstr &MI, std::vector<MachineOperand> &Pred) const {
1413  auto &HRI = getRegisterInfo();
1414  for (unsigned oper = 0; oper < MI.getNumOperands(); ++oper) {
1415  MachineOperand MO = MI.getOperand(oper);
1416  if (MO.isReg() && MO.isDef()) {
1417  const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg());
1418  if (RC == &Hexagon::PredRegsRegClass) {
1419  Pred.push_back(MO);
1420  return true;
1421  }
1422  }
1423  }
1424  return false;
1425 }
1426 
1428  return MI.getDesc().isPredicable();
1429 }
1430 
1432  const MachineBasicBlock *MBB,
1433  const MachineFunction &MF) const {
1434  // Debug info is never a scheduling boundary. It's necessary to be explicit
1435  // due to the special treatment of IT instructions below, otherwise a
1436  // dbg_value followed by an IT will result in the IT instruction being
1437  // considered a scheduling hazard, which is wrong. It should be the actual
1438  // instruction preceding the dbg_value instruction(s), just like it is
1439  // when debug info is not present.
1440  if (MI.isDebugValue())
1441  return false;
1442 
1443  // Throwing call is a boundary.
1444  if (MI.isCall()) {
1445  // Don't mess around with no return calls.
1446  if (doesNotReturn(MI))
1447  return true;
1448  // If any of the block's successors is a landing pad, this could be a
1449  // throwing call.
1450  for (auto I : MBB->successors())
1451  if (I->isEHPad())
1452  return true;
1453  }
1454 
1455  // Terminators and labels can't be scheduled around.
1456  if (MI.getDesc().isTerminator() || MI.isPosition())
1457  return true;
1458 
1459  if (MI.isInlineAsm() && !ScheduleInlineAsm)
1460  return true;
1461 
1462  return false;
1463 }
1464 
1465 /// Measure the specified inline asm to determine an approximation of its
1466 /// length.
1467 /// Comments (which run till the next SeparatorString or newline) do not
1468 /// count as an instruction.
1469 /// Any other non-whitespace text is considered an instruction, with
1470 /// multiple instructions separated by SeparatorString or newlines.
1471 /// Variable-length instructions are not handled here; this function
1472 /// may be overloaded in the target code to do that.
1473 /// Hexagon counts the number of ##'s and adjust for that many
1474 /// constant exenders.
1475 unsigned HexagonInstrInfo::getInlineAsmLength(const char *Str,
1476  const MCAsmInfo &MAI) const {
1477  StringRef AStr(Str);
1478  // Count the number of instructions in the asm.
1479  bool atInsnStart = true;
1480  unsigned Length = 0;
1481  for (; *Str; ++Str) {
1482  if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
1483  strlen(MAI.getSeparatorString())) == 0)
1484  atInsnStart = true;
1485  if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
1486  Length += MAI.getMaxInstLength();
1487  atInsnStart = false;
1488  }
1489  if (atInsnStart && strncmp(Str, MAI.getCommentString().data(),
1490  MAI.getCommentString().size()) == 0)
1491  atInsnStart = false;
1492  }
1493 
1494  // Add to size number of constant extenders seen * 4.
1495  StringRef Occ("##");
1496  Length += AStr.count(Occ)*4;
1497  return Length;
1498 }
1499 
1502  const InstrItineraryData *II, const ScheduleDAG *DAG) const {
1503  if (UseDFAHazardRec) {
1504  auto &HST = DAG->MF.getSubtarget<HexagonSubtarget>();
1505  return new HexagonHazardRecognizer(II, this, HST);
1506  }
1508 }
1509 
1510 /// \brief For a comparison instruction, return the source registers in
1511 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
1512 /// compares against in CmpValue. Return true if the comparison instruction
1513 /// can be analyzed.
1514 bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
1515  unsigned &SrcReg2, int &Mask,
1516  int &Value) const {
1517  unsigned Opc = MI.getOpcode();
1518 
1519  // Set mask and the first source register.
1520  switch (Opc) {
1521  case Hexagon::C2_cmpeq:
1522  case Hexagon::C2_cmpeqp:
1523  case Hexagon::C2_cmpgt:
1524  case Hexagon::C2_cmpgtp:
1525  case Hexagon::C2_cmpgtu:
1526  case Hexagon::C2_cmpgtup:
1527  case Hexagon::C4_cmpneq:
1528  case Hexagon::C4_cmplte:
1529  case Hexagon::C4_cmplteu:
1530  case Hexagon::C2_cmpeqi:
1531  case Hexagon::C2_cmpgti:
1532  case Hexagon::C2_cmpgtui:
1533  case Hexagon::C4_cmpneqi:
1534  case Hexagon::C4_cmplteui:
1535  case Hexagon::C4_cmpltei:
1536  SrcReg = MI.getOperand(1).getReg();
1537  Mask = ~0;
1538  break;
1539  case Hexagon::A4_cmpbeq:
1540  case Hexagon::A4_cmpbgt:
1541  case Hexagon::A4_cmpbgtu:
1542  case Hexagon::A4_cmpbeqi:
1543  case Hexagon::A4_cmpbgti:
1544  case Hexagon::A4_cmpbgtui:
1545  SrcReg = MI.getOperand(1).getReg();
1546  Mask = 0xFF;
1547  break;
1548  case Hexagon::A4_cmpheq:
1549  case Hexagon::A4_cmphgt:
1550  case Hexagon::A4_cmphgtu:
1551  case Hexagon::A4_cmpheqi:
1552  case Hexagon::A4_cmphgti:
1553  case Hexagon::A4_cmphgtui:
1554  SrcReg = MI.getOperand(1).getReg();
1555  Mask = 0xFFFF;
1556  break;
1557  }
1558 
1559  // Set the value/second source register.
1560  switch (Opc) {
1561  case Hexagon::C2_cmpeq:
1562  case Hexagon::C2_cmpeqp:
1563  case Hexagon::C2_cmpgt:
1564  case Hexagon::C2_cmpgtp:
1565  case Hexagon::C2_cmpgtu:
1566  case Hexagon::C2_cmpgtup:
1567  case Hexagon::A4_cmpbeq:
1568  case Hexagon::A4_cmpbgt:
1569  case Hexagon::A4_cmpbgtu:
1570  case Hexagon::A4_cmpheq:
1571  case Hexagon::A4_cmphgt:
1572  case Hexagon::A4_cmphgtu:
1573  case Hexagon::C4_cmpneq:
1574  case Hexagon::C4_cmplte:
1575  case Hexagon::C4_cmplteu:
1576  SrcReg2 = MI.getOperand(2).getReg();
1577  return true;
1578 
1579  case Hexagon::C2_cmpeqi:
1580  case Hexagon::C2_cmpgtui:
1581  case Hexagon::C2_cmpgti:
1582  case Hexagon::C4_cmpneqi:
1583  case Hexagon::C4_cmplteui:
1584  case Hexagon::C4_cmpltei:
1585  case Hexagon::A4_cmpbeqi:
1586  case Hexagon::A4_cmpbgti:
1587  case Hexagon::A4_cmpbgtui:
1588  case Hexagon::A4_cmpheqi:
1589  case Hexagon::A4_cmphgti:
1590  case Hexagon::A4_cmphgtui:
1591  SrcReg2 = 0;
1592  Value = MI.getOperand(2).getImm();
1593  return true;
1594  }
1595 
1596  return false;
1597 }
1598 
1600  const MachineInstr &MI,
1601  unsigned *PredCost) const {
1602  return getInstrTimingClassLatency(ItinData, MI);
1603 }
1604 
1606  const TargetSubtargetInfo &STI) const {
1607  const InstrItineraryData *II = STI.getInstrItineraryData();
1608  return static_cast<const HexagonSubtarget&>(STI).createDFAPacketizer(II);
1609 }
1610 
1611 // Inspired by this pair:
1612 // %R13<def> = L2_loadri_io %R29, 136; mem:LD4[FixedStack0]
1613 // S2_storeri_io %R29, 132, %R1<kill>; flags: mem:ST4[FixedStack1]
1614 // Currently AA considers the addresses in these instructions to be aliasing.
1616  MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
1617  int OffsetA = 0, OffsetB = 0;
1618  unsigned SizeA = 0, SizeB = 0;
1619 
1620  if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1622  return false;
1623 
1624  // Instructions that are pure loads, not loads and stores like memops are not
1625  // dependent.
1626  if (MIa.mayLoad() && !isMemOp(MIa) && MIb.mayLoad() && !isMemOp(MIb))
1627  return true;
1628 
1629  // Get base, offset, and access size in MIa.
1630  unsigned BaseRegA = getBaseAndOffset(MIa, OffsetA, SizeA);
1631  if (!BaseRegA || !SizeA)
1632  return false;
1633 
1634  // Get base, offset, and access size in MIb.
1635  unsigned BaseRegB = getBaseAndOffset(MIb, OffsetB, SizeB);
1636  if (!BaseRegB || !SizeB)
1637  return false;
1638 
1639  if (BaseRegA != BaseRegB)
1640  return false;
1641 
1642  // This is a mem access with the same base register and known offsets from it.
1643  // Reason about it.
1644  if (OffsetA > OffsetB) {
1645  uint64_t offDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB);
1646  return (SizeB <= offDiff);
1647  } else if (OffsetA < OffsetB) {
1648  uint64_t offDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA);
1649  return (SizeA <= offDiff);
1650  }
1651 
1652  return false;
1653 }
1654 
1655 /// If the instruction is an increment of a constant value, return the amount.
1657  int &Value) const {
1658  if (isPostIncrement(MI)) {
1659  unsigned AccessSize;
1660  return getBaseAndOffset(MI, Value, AccessSize);
1661  }
1662  if (MI.getOpcode() == Hexagon::A2_addi) {
1663  Value = MI.getOperand(2).getImm();
1664  return true;
1665  }
1666 
1667  return false;
1668 }
1669 
1672  const TargetRegisterClass *TRC;
1673  if (VT == MVT::i1) {
1674  TRC = &Hexagon::PredRegsRegClass;
1675  } else if (VT == MVT::i32 || VT == MVT::f32) {
1676  TRC = &Hexagon::IntRegsRegClass;
1677  } else if (VT == MVT::i64 || VT == MVT::f64) {
1678  TRC = &Hexagon::DoubleRegsRegClass;
1679  } else {
1680  llvm_unreachable("Cannot handle this register class");
1681  }
1682 
1683  unsigned NewReg = MRI.createVirtualRegister(TRC);
1684  return NewReg;
1685 }
1686 
1688  return (getAddrMode(MI) == HexagonII::AbsoluteSet);
1689 }
1690 
1692  const uint64_t F = MI.getDesc().TSFlags;
1694 }
1695 
1697  const MachineFunction *MF = MI.getParent()->getParent();
1698  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1699  const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
1700 
1701  if (!(isTC1(MI))
1702  && !(QII->isTC2Early(MI))
1703  && !(MI.getDesc().mayLoad())
1704  && !(MI.getDesc().mayStore())
1705  && (MI.getDesc().getOpcode() != Hexagon::S2_allocframe)
1706  && (MI.getDesc().getOpcode() != Hexagon::L2_deallocframe)
1707  && !(QII->isMemOp(MI))
1708  && !(MI.isBranch())
1709  && !(MI.isReturn())
1710  && !MI.isCall())
1711  return true;
1712 
1713  return false;
1714 }
1715 
1716 // Return true if the instruction is a compund branch instruction.
1718  return (getType(MI) == HexagonII::TypeCOMPOUND && MI.isBranch());
1719 }
1720 
1722  return (MI.isBranch() && isPredicated(MI)) ||
1723  isConditionalTransfer(MI) ||
1724  isConditionalALU32(MI) ||
1725  isConditionalLoad(MI) ||
1726  // Predicated stores which don't have a .new on any operands.
1727  (MI.mayStore() && isPredicated(MI) && !isNewValueStore(MI) &&
1728  !isPredicatedNew(MI));
1729 }
1730 
1732  switch (MI.getOpcode()) {
1733  case Hexagon::A2_paddf:
1734  case Hexagon::A2_paddfnew:
1735  case Hexagon::A2_paddif:
1736  case Hexagon::A2_paddifnew:
1737  case Hexagon::A2_paddit:
1738  case Hexagon::A2_padditnew:
1739  case Hexagon::A2_paddt:
1740  case Hexagon::A2_paddtnew:
1741  case Hexagon::A2_pandf:
1742  case Hexagon::A2_pandfnew:
1743  case Hexagon::A2_pandt:
1744  case Hexagon::A2_pandtnew:
1745  case Hexagon::A2_porf:
1746  case Hexagon::A2_porfnew:
1747  case Hexagon::A2_port:
1748  case Hexagon::A2_portnew:
1749  case Hexagon::A2_psubf:
1750  case Hexagon::A2_psubfnew:
1751  case Hexagon::A2_psubt:
1752  case Hexagon::A2_psubtnew:
1753  case Hexagon::A2_pxorf:
1754  case Hexagon::A2_pxorfnew:
1755  case Hexagon::A2_pxort:
1756  case Hexagon::A2_pxortnew:
1757  case Hexagon::A4_paslhf:
1758  case Hexagon::A4_paslhfnew:
1759  case Hexagon::A4_paslht:
1760  case Hexagon::A4_paslhtnew:
1761  case Hexagon::A4_pasrhf:
1762  case Hexagon::A4_pasrhfnew:
1763  case Hexagon::A4_pasrht:
1764  case Hexagon::A4_pasrhtnew:
1765  case Hexagon::A4_psxtbf:
1766  case Hexagon::A4_psxtbfnew:
1767  case Hexagon::A4_psxtbt:
1768  case Hexagon::A4_psxtbtnew:
1769  case Hexagon::A4_psxthf:
1770  case Hexagon::A4_psxthfnew:
1771  case Hexagon::A4_psxtht:
1772  case Hexagon::A4_psxthtnew:
1773  case Hexagon::A4_pzxtbf:
1774  case Hexagon::A4_pzxtbfnew:
1775  case Hexagon::A4_pzxtbt:
1776  case Hexagon::A4_pzxtbtnew:
1777  case Hexagon::A4_pzxthf:
1778  case Hexagon::A4_pzxthfnew:
1779  case Hexagon::A4_pzxtht:
1780  case Hexagon::A4_pzxthtnew:
1781  case Hexagon::C2_ccombinewf:
1782  case Hexagon::C2_ccombinewt:
1783  return true;
1784  }
1785  return false;
1786 }
1787 
1788 // FIXME - Function name and it's functionality don't match.
1789 // It should be renamed to hasPredNewOpcode()
1791  if (!MI.getDesc().mayLoad() || !isPredicated(MI))
1792  return false;
1793 
1794  int PNewOpcode = Hexagon::getPredNewOpcode(MI.getOpcode());
1795  // Instruction with valid predicated-new opcode can be promoted to .new.
1796  return PNewOpcode >= 0;
1797 }
1798 
1799 // Returns true if an instruction is a conditional store.
1800 //
1801 // Note: It doesn't include conditional new-value stores as they can't be
1802 // converted to .new predicate.
1804  switch (MI.getOpcode()) {
1805  default: return false;
1806  case Hexagon::S4_storeirbt_io:
1807  case Hexagon::S4_storeirbf_io:
1808  case Hexagon::S4_pstorerbt_rr:
1809  case Hexagon::S4_pstorerbf_rr:
1810  case Hexagon::S2_pstorerbt_io:
1811  case Hexagon::S2_pstorerbf_io:
1812  case Hexagon::S2_pstorerbt_pi:
1813  case Hexagon::S2_pstorerbf_pi:
1814  case Hexagon::S2_pstorerdt_io:
1815  case Hexagon::S2_pstorerdf_io:
1816  case Hexagon::S4_pstorerdt_rr:
1817  case Hexagon::S4_pstorerdf_rr:
1818  case Hexagon::S2_pstorerdt_pi:
1819  case Hexagon::S2_pstorerdf_pi:
1820  case Hexagon::S2_pstorerht_io:
1821  case Hexagon::S2_pstorerhf_io:
1822  case Hexagon::S4_storeirht_io:
1823  case Hexagon::S4_storeirhf_io:
1824  case Hexagon::S4_pstorerht_rr:
1825  case Hexagon::S4_pstorerhf_rr:
1826  case Hexagon::S2_pstorerht_pi:
1827  case Hexagon::S2_pstorerhf_pi:
1828  case Hexagon::S2_pstorerit_io:
1829  case Hexagon::S2_pstorerif_io:
1830  case Hexagon::S4_storeirit_io:
1831  case Hexagon::S4_storeirif_io:
1832  case Hexagon::S4_pstorerit_rr:
1833  case Hexagon::S4_pstorerif_rr:
1834  case Hexagon::S2_pstorerit_pi:
1835  case Hexagon::S2_pstorerif_pi:
1836 
1837  // V4 global address store before promoting to dot new.
1838  case Hexagon::S4_pstorerdt_abs:
1839  case Hexagon::S4_pstorerdf_abs:
1840  case Hexagon::S4_pstorerbt_abs:
1841  case Hexagon::S4_pstorerbf_abs:
1842  case Hexagon::S4_pstorerht_abs:
1843  case Hexagon::S4_pstorerhf_abs:
1844  case Hexagon::S4_pstorerit_abs:
1845  case Hexagon::S4_pstorerif_abs:
1846  return true;
1847 
1848  // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
1849  // from the "Conditional Store" list. Because a predicated new value store
1850  // would NOT be promoted to a double dot new store.
1851  // This function returns yes for those stores that are predicated but not
1852  // yet promoted to predicate dot new instructions.
1853  }
1854 }
1855 
1857  switch (MI.getOpcode()) {
1858  case Hexagon::A2_tfrt:
1859  case Hexagon::A2_tfrf:
1860  case Hexagon::C2_cmoveit:
1861  case Hexagon::C2_cmoveif:
1862  case Hexagon::A2_tfrtnew:
1863  case Hexagon::A2_tfrfnew:
1864  case Hexagon::C2_cmovenewit:
1865  case Hexagon::C2_cmovenewif:
1866  case Hexagon::A2_tfrpt:
1867  case Hexagon::A2_tfrpf:
1868  return true;
1869 
1870  default:
1871  return false;
1872  }
1873  return false;
1874 }
1875 
1876 // TODO: In order to have isExtendable for fpimm/f32Ext, we need to handle
1877 // isFPImm and later getFPImm as well.
1879  const uint64_t F = MI.getDesc().TSFlags;
1881  if (isExtended) // Instruction must be extended.
1882  return true;
1883 
1884  unsigned isExtendable =
1886  if (!isExtendable)
1887  return false;
1888 
1889  if (MI.isCall())
1890  return false;
1891 
1892  short ExtOpNum = getCExtOpNum(MI);
1893  const MachineOperand &MO = MI.getOperand(ExtOpNum);
1894  // Use MO operand flags to determine if MO
1895  // has the HMOTF_ConstExtended flag set.
1897  return true;
1898  // If this is a Machine BB address we are talking about, and it is
1899  // not marked as extended, say so.
1900  if (MO.isMBB())
1901  return false;
1902 
1903  // We could be using an instruction with an extendable immediate and shoehorn
1904  // a global address into it. If it is a global address it will be constant
1905  // extended. We do this for COMBINE.
1906  // We currently only handle isGlobal() because it is the only kind of
1907  // object we are going to end up with here for now.
1908  // In the future we probably should add isSymbol(), etc.
1909  if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
1910  MO.isJTI() || MO.isCPI() || MO.isFPImm())
1911  return true;
1912 
1913  // If the extendable operand is not 'Immediate' type, the instruction should
1914  // have 'isExtended' flag set.
1915  assert(MO.isImm() && "Extendable operand must be Immediate type");
1916 
1917  int MinValue = getMinValue(MI);
1918  int MaxValue = getMaxValue(MI);
1919  int ImmValue = MO.getImm();
1920 
1921  return (ImmValue < MinValue || ImmValue > MaxValue);
1922 }
1923 
1925  switch (MI.getOpcode()) {
1926  case Hexagon::L4_return :
1927  case Hexagon::L4_return_t :
1928  case Hexagon::L4_return_f :
1929  case Hexagon::L4_return_tnew_pnt :
1930  case Hexagon::L4_return_fnew_pnt :
1931  case Hexagon::L4_return_tnew_pt :
1932  case Hexagon::L4_return_fnew_pt :
1933  return true;
1934  }
1935  return false;
1936 }
1937 
1938 // Return true when ConsMI uses a register defined by ProdMI.
1940  const MachineInstr &ConsMI) const {
1941  if (!ProdMI.getDesc().getNumDefs())
1942  return false;
1943 
1944  auto &HRI = getRegisterInfo();
1945 
1950 
1951  parseOperands(ProdMI, DefsA, UsesA);
1952  parseOperands(ConsMI, DefsB, UsesB);
1953 
1954  for (auto &RegA : DefsA)
1955  for (auto &RegB : UsesB) {
1956  // True data dependency.
1957  if (RegA == RegB)
1958  return true;
1959 
1960  if (Hexagon::DoubleRegsRegClass.contains(RegA))
1961  for (MCSubRegIterator SubRegs(RegA, &HRI); SubRegs.isValid(); ++SubRegs)
1962  if (RegB == *SubRegs)
1963  return true;
1964 
1965  if (Hexagon::DoubleRegsRegClass.contains(RegB))
1966  for (MCSubRegIterator SubRegs(RegB, &HRI); SubRegs.isValid(); ++SubRegs)
1967  if (RegA == *SubRegs)
1968  return true;
1969  }
1970 
1971  return false;
1972 }
1973 
1974 // Returns true if the instruction is alread a .cur.
1976  switch (MI.getOpcode()) {
1977  case Hexagon::V6_vL32b_cur_pi:
1978  case Hexagon::V6_vL32b_cur_ai:
1979  case Hexagon::V6_vL32b_cur_pi_128B:
1980  case Hexagon::V6_vL32b_cur_ai_128B:
1981  return true;
1982  }
1983  return false;
1984 }
1985 
1986 // Returns true, if any one of the operands is a dot new
1987 // insn, whether it is predicated dot new or register dot new.
1989  if (isNewValueInst(MI) || (isPredicated(MI) && isPredicatedNew(MI)))
1990  return true;
1991 
1992  return false;
1993 }
1994 
1995 /// Symmetrical. See if these two instructions are fit for duplex pair.
1997  const MachineInstr &MIb) const {
2000  return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG));
2001 }
2002 
2004  if (MI.mayLoad() || MI.mayStore() || MI.isCompare())
2005  return true;
2006 
2007  // Multiply
2008  unsigned SchedClass = MI.getDesc().getSchedClass();
2009  if (SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23)
2010  return true;
2011  return false;
2012 }
2013 
2014 bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
2015  return (Opcode == Hexagon::ENDLOOP0 ||
2016  Opcode == Hexagon::ENDLOOP1);
2017 }
2018 
2019 bool HexagonInstrInfo::isExpr(unsigned OpType) const {
2020  switch(OpType) {
2027  return true;
2028  default:
2029  return false;
2030  }
2031 }
2032 
2034  const MCInstrDesc &MID = MI.getDesc();
2035  const uint64_t F = MID.TSFlags;
2037  return true;
2038 
2039  // TODO: This is largely obsolete now. Will need to be removed
2040  // in consecutive patches.
2041  switch (MI.getOpcode()) {
2042  // PS_fi and PS_fia remain special cases.
2043  case Hexagon::PS_fi:
2044  case Hexagon::PS_fia:
2045  return true;
2046  default:
2047  return false;
2048  }
2049  return false;
2050 }
2051 
2052 // This returns true in two cases:
2053 // - The OP code itself indicates that this is an extended instruction.
2054 // - One of MOs has been marked with HMOTF_ConstExtended flag.
2056  // First check if this is permanently extended op code.
2057  const uint64_t F = MI.getDesc().TSFlags;
2059  return true;
2060  // Use MO operand flags to determine if one of MI's operands
2061  // has HMOTF_ConstExtended flag set.
2063  E = MI.operands_end(); I != E; ++I) {
2064  if (I->getTargetFlags() && HexagonII::HMOTF_ConstExtended)
2065  return true;
2066  }
2067  return false;
2068 }
2069 
2071  unsigned Opcode = MI.getOpcode();
2072  const uint64_t F = get(Opcode).TSFlags;
2073  return (F >> HexagonII::FPPos) & HexagonII::FPMask;
2074 }
2075 
2076 // No V60 HVX VMEM with A_INDIRECT.
2078  const MachineInstr &J) const {
2079  if (!isV60VectorInstruction(I))
2080  return false;
2081  if (!I.mayLoad() && !I.mayStore())
2082  return false;
2083  return J.isIndirectBranch() || isIndirectCall(J) || isIndirectL4Return(J);
2084 }
2085 
2087  switch (MI.getOpcode()) {
2088  case Hexagon::J2_callr :
2089  case Hexagon::J2_callrf :
2090  case Hexagon::J2_callrt :
2091  case Hexagon::PS_call_nr :
2092  return true;
2093  }
2094  return false;
2095 }
2096 
2098  switch (MI.getOpcode()) {
2099  case Hexagon::L4_return :
2100  case Hexagon::L4_return_t :
2101  case Hexagon::L4_return_f :
2102  case Hexagon::L4_return_fnew_pnt :
2103  case Hexagon::L4_return_fnew_pt :
2104  case Hexagon::L4_return_tnew_pnt :
2105  case Hexagon::L4_return_tnew_pt :
2106  return true;
2107  }
2108  return false;
2109 }
2110 
2112  switch (MI.getOpcode()) {
2113  case Hexagon::J2_jumpr :
2114  case Hexagon::J2_jumprt :
2115  case Hexagon::J2_jumprf :
2116  case Hexagon::J2_jumprtnewpt :
2117  case Hexagon::J2_jumprfnewpt :
2118  case Hexagon::J2_jumprtnew :
2119  case Hexagon::J2_jumprfnew :
2120  return true;
2121  }
2122  return false;
2123 }
2124 
2125 // Return true if a given MI can accommodate given offset.
2126 // Use abs estimate as oppose to the exact number.
2127 // TODO: This will need to be changed to use MC level
2128 // definition of instruction extendable field size.
2130  unsigned offset) const {
2131  // This selection of jump instructions matches to that what
2132  // AnalyzeBranch can parse, plus NVJ.
2133  if (isNewValueJump(MI)) // r9:2
2134  return isInt<11>(offset);
2135 
2136  switch (MI.getOpcode()) {
2137  // Still missing Jump to address condition on register value.
2138  default:
2139  return false;
2140  case Hexagon::J2_jump: // bits<24> dst; // r22:2
2141  case Hexagon::J2_call:
2142  case Hexagon::PS_call_nr:
2143  return isInt<24>(offset);
2144  case Hexagon::J2_jumpt: //bits<17> dst; // r15:2
2145  case Hexagon::J2_jumpf:
2146  case Hexagon::J2_jumptnew:
2147  case Hexagon::J2_jumptnewpt:
2148  case Hexagon::J2_jumpfnew:
2149  case Hexagon::J2_jumpfnewpt:
2150  case Hexagon::J2_callt:
2151  case Hexagon::J2_callf:
2152  return isInt<17>(offset);
2153  case Hexagon::J2_loop0i:
2154  case Hexagon::J2_loop0iext:
2155  case Hexagon::J2_loop0r:
2156  case Hexagon::J2_loop0rext:
2157  case Hexagon::J2_loop1i:
2158  case Hexagon::J2_loop1iext:
2159  case Hexagon::J2_loop1r:
2160  case Hexagon::J2_loop1rext:
2161  return isInt<9>(offset);
2162  // TODO: Add all the compound branches here. Can we do this in Relation model?
2163  case Hexagon::J4_cmpeqi_tp0_jump_nt:
2164  case Hexagon::J4_cmpeqi_tp1_jump_nt:
2165  return isInt<11>(offset);
2166  }
2167 }
2168 
2170  const MachineInstr &ESMI) const {
2171  bool isLate = isLateResultInstr(LRMI);
2172  bool isEarly = isEarlySourceInstr(ESMI);
2173 
2174  DEBUG(dbgs() << "V60" << (isLate ? "-LR " : " -- "));
2175  DEBUG(LRMI.dump());
2176  DEBUG(dbgs() << "V60" << (isEarly ? "-ES " : " -- "));
2177  DEBUG(ESMI.dump());
2178 
2179  if (isLate && isEarly) {
2180  DEBUG(dbgs() << "++Is Late Result feeding Early Source\n");
2181  return true;
2182  }
2183 
2184  return false;
2185 }
2186 
2188  switch (MI.getOpcode()) {
2189  case TargetOpcode::EXTRACT_SUBREG:
2190  case TargetOpcode::INSERT_SUBREG:
2191  case TargetOpcode::SUBREG_TO_REG:
2192  case TargetOpcode::REG_SEQUENCE:
2193  case TargetOpcode::IMPLICIT_DEF:
2194  case TargetOpcode::COPY:
2196  case TargetOpcode::PHI:
2197  return false;
2198  default:
2199  break;
2200  }
2201 
2202  unsigned SchedClass = MI.getDesc().getSchedClass();
2203 
2204  switch (SchedClass) {
2205  case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
2206  case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
2207  case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
2208  case Hexagon::Sched::ALU64_tc_1_SLOT23:
2209  case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
2210  case Hexagon::Sched::S_2op_tc_1_SLOT23:
2211  case Hexagon::Sched::S_3op_tc_1_SLOT23:
2212  case Hexagon::Sched::V2LDST_tc_ld_SLOT01:
2213  case Hexagon::Sched::V2LDST_tc_st_SLOT0:
2214  case Hexagon::Sched::V2LDST_tc_st_SLOT01:
2215  case Hexagon::Sched::V4LDST_tc_ld_SLOT01:
2216  case Hexagon::Sched::V4LDST_tc_st_SLOT0:
2217  case Hexagon::Sched::V4LDST_tc_st_SLOT01:
2218  return false;
2219  }
2220  return true;
2221 }
2222 
2224  // Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
2225  // resource, but all operands can be received late like an ALU instruction.
2226  return MI.getDesc().getSchedClass() == Hexagon::Sched::CVI_VX_LATE;
2227 }
2228 
2230  unsigned Opcode = MI.getOpcode();
2231  return Opcode == Hexagon::J2_loop0i ||
2232  Opcode == Hexagon::J2_loop0r ||
2233  Opcode == Hexagon::J2_loop0iext ||
2234  Opcode == Hexagon::J2_loop0rext ||
2235  Opcode == Hexagon::J2_loop1i ||
2236  Opcode == Hexagon::J2_loop1r ||
2237  Opcode == Hexagon::J2_loop1iext ||
2238  Opcode == Hexagon::J2_loop1rext;
2239 }
2240 
2242  switch (MI.getOpcode()) {
2243  default: return false;
2244  case Hexagon::L4_iadd_memopw_io :
2245  case Hexagon::L4_isub_memopw_io :
2246  case Hexagon::L4_add_memopw_io :
2247  case Hexagon::L4_sub_memopw_io :
2248  case Hexagon::L4_and_memopw_io :
2249  case Hexagon::L4_or_memopw_io :
2250  case Hexagon::L4_iadd_memoph_io :
2251  case Hexagon::L4_isub_memoph_io :
2252  case Hexagon::L4_add_memoph_io :
2253  case Hexagon::L4_sub_memoph_io :
2254  case Hexagon::L4_and_memoph_io :
2255  case Hexagon::L4_or_memoph_io :
2256  case Hexagon::L4_iadd_memopb_io :
2257  case Hexagon::L4_isub_memopb_io :
2258  case Hexagon::L4_add_memopb_io :
2259  case Hexagon::L4_sub_memopb_io :
2260  case Hexagon::L4_and_memopb_io :
2261  case Hexagon::L4_or_memopb_io :
2262  case Hexagon::L4_ior_memopb_io:
2263  case Hexagon::L4_ior_memoph_io:
2264  case Hexagon::L4_ior_memopw_io:
2265  case Hexagon::L4_iand_memopb_io:
2266  case Hexagon::L4_iand_memoph_io:
2267  case Hexagon::L4_iand_memopw_io:
2268  return true;
2269  }
2270  return false;
2271 }
2272 
2274  const uint64_t F = MI.getDesc().TSFlags;
2276 }
2277 
2278 bool HexagonInstrInfo::isNewValue(unsigned Opcode) const {
2279  const uint64_t F = get(Opcode).TSFlags;
2281 }
2282 
2284  return isNewValueJump(MI) || isNewValueStore(MI);
2285 }
2286 
2288  return isNewValue(MI) && MI.isBranch();
2289 }
2290 
2291 bool HexagonInstrInfo::isNewValueJump(unsigned Opcode) const {
2292  return isNewValue(Opcode) && get(Opcode).isBranch() && isPredicated(Opcode);
2293 }
2294 
2296  const uint64_t F = MI.getDesc().TSFlags;
2298 }
2299 
2300 bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
2301  const uint64_t F = get(Opcode).TSFlags;
2303 }
2304 
2305 // Returns true if a particular operand is extendable for an instruction.
2307  unsigned OperandNum) const {
2308  const uint64_t F = MI.getDesc().TSFlags;
2310  == OperandNum;
2311 }
2312 
2314  const uint64_t F = MI.getDesc().TSFlags;
2315  assert(isPredicated(MI));
2317 }
2318 
2319 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
2320  const uint64_t F = get(Opcode).TSFlags;
2321  assert(isPredicated(Opcode));
2323 }
2324 
2326  const uint64_t F = MI.getDesc().TSFlags;
2327  return !((F >> HexagonII::PredicatedFalsePos) &
2329 }
2330 
2331 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
2332  const uint64_t F = get(Opcode).TSFlags;
2333  // Make sure that the instruction is predicated.
2335  return !((F >> HexagonII::PredicatedFalsePos) &
2337 }
2338 
2339 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
2340  const uint64_t F = get(Opcode).TSFlags;
2342 }
2343 
2344 bool HexagonInstrInfo::isPredicateLate(unsigned Opcode) const {
2345  const uint64_t F = get(Opcode).TSFlags;
2347 }
2348 
2349 bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const {
2350  const uint64_t F = get(Opcode).TSFlags;
2351  assert(get(Opcode).isBranch() &&
2352  (isPredicatedNew(Opcode) || isNewValue(Opcode)));
2353  return (F >> HexagonII::TakenPos) & HexagonII::TakenMask;
2354 }
2355 
2357  return MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2358  MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
2359  MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
2360  MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
2361 }
2362 
2364  switch (MI.getOpcode()) {
2365  // Byte
2366  case Hexagon::L2_loadrb_io:
2367  case Hexagon::L4_loadrb_ur:
2368  case Hexagon::L4_loadrb_ap:
2369  case Hexagon::L2_loadrb_pr:
2370  case Hexagon::L2_loadrb_pbr:
2371  case Hexagon::L2_loadrb_pi:
2372  case Hexagon::L2_loadrb_pci:
2373  case Hexagon::L2_loadrb_pcr:
2374  case Hexagon::L2_loadbsw2_io:
2375  case Hexagon::L4_loadbsw2_ur:
2376  case Hexagon::L4_loadbsw2_ap:
2377  case Hexagon::L2_loadbsw2_pr:
2378  case Hexagon::L2_loadbsw2_pbr:
2379  case Hexagon::L2_loadbsw2_pi:
2380  case Hexagon::L2_loadbsw2_pci:
2381  case Hexagon::L2_loadbsw2_pcr:
2382  case Hexagon::L2_loadbsw4_io:
2383  case Hexagon::L4_loadbsw4_ur:
2384  case Hexagon::L4_loadbsw4_ap:
2385  case Hexagon::L2_loadbsw4_pr:
2386  case Hexagon::L2_loadbsw4_pbr:
2387  case Hexagon::L2_loadbsw4_pi:
2388  case Hexagon::L2_loadbsw4_pci:
2389  case Hexagon::L2_loadbsw4_pcr:
2390  case Hexagon::L4_loadrb_rr:
2391  case Hexagon::L2_ploadrbt_io:
2392  case Hexagon::L2_ploadrbt_pi:
2393  case Hexagon::L2_ploadrbf_io:
2394  case Hexagon::L2_ploadrbf_pi:
2395  case Hexagon::L2_ploadrbtnew_io:
2396  case Hexagon::L2_ploadrbfnew_io:
2397  case Hexagon::L4_ploadrbt_rr:
2398  case Hexagon::L4_ploadrbf_rr:
2399  case Hexagon::L4_ploadrbtnew_rr:
2400  case Hexagon::L4_ploadrbfnew_rr:
2401  case Hexagon::L2_ploadrbtnew_pi:
2402  case Hexagon::L2_ploadrbfnew_pi:
2403  case Hexagon::L4_ploadrbt_abs:
2404  case Hexagon::L4_ploadrbf_abs:
2405  case Hexagon::L4_ploadrbtnew_abs:
2406  case Hexagon::L4_ploadrbfnew_abs:
2407  case Hexagon::L2_loadrbgp:
2408  // Half
2409  case Hexagon::L2_loadrh_io:
2410  case Hexagon::L4_loadrh_ur:
2411  case Hexagon::L4_loadrh_ap:
2412  case Hexagon::L2_loadrh_pr:
2413  case Hexagon::L2_loadrh_pbr:
2414  case Hexagon::L2_loadrh_pi:
2415  case Hexagon::L2_loadrh_pci:
2416  case Hexagon::L2_loadrh_pcr:
2417  case Hexagon::L4_loadrh_rr:
2418  case Hexagon::L2_ploadrht_io:
2419  case Hexagon::L2_ploadrht_pi:
2420  case Hexagon::L2_ploadrhf_io:
2421  case Hexagon::L2_ploadrhf_pi:
2422  case Hexagon::L2_ploadrhtnew_io:
2423  case Hexagon::L2_ploadrhfnew_io:
2424  case Hexagon::L4_ploadrht_rr:
2425  case Hexagon::L4_ploadrhf_rr:
2426  case Hexagon::L4_ploadrhtnew_rr:
2427  case Hexagon::L4_ploadrhfnew_rr:
2428  case Hexagon::L2_ploadrhtnew_pi:
2429  case Hexagon::L2_ploadrhfnew_pi:
2430  case Hexagon::L4_ploadrht_abs:
2431  case Hexagon::L4_ploadrhf_abs:
2432  case Hexagon::L4_ploadrhtnew_abs:
2433  case Hexagon::L4_ploadrhfnew_abs:
2434  case Hexagon::L2_loadrhgp:
2435  return true;
2436  default:
2437  return false;
2438  }
2439 }
2440 
2442  const uint64_t F = MI.getDesc().TSFlags;
2443  return (F >> HexagonII::SoloPos) & HexagonII::SoloMask;
2444 }
2445 
2447  switch (MI.getOpcode()) {
2448  case Hexagon::STriw_pred :
2449  case Hexagon::LDriw_pred :
2450  return true;
2451  default:
2452  return false;
2453  }
2454 }
2455 
2457  if (!MI.isBranch())
2458  return false;
2459 
2460  for (auto &Op : MI.operands())
2461  if (Op.isGlobal() || Op.isSymbol())
2462  return true;
2463  return false;
2464 }
2465 
2466 // Returns true when SU has a timing class TC1.
2468  unsigned SchedClass = MI.getDesc().getSchedClass();
2469  switch (SchedClass) {
2470  case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
2471  case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
2472  case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
2473  case Hexagon::Sched::ALU64_tc_1_SLOT23:
2474  case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
2475  //case Hexagon::Sched::M_tc_1_SLOT23:
2476  case Hexagon::Sched::S_2op_tc_1_SLOT23:
2477  case Hexagon::Sched::S_3op_tc_1_SLOT23:
2478  return true;
2479 
2480  default:
2481  return false;
2482  }
2483 }
2484 
2486  unsigned SchedClass = MI.getDesc().getSchedClass();
2487  switch (SchedClass) {
2488  case Hexagon::Sched::ALU32_3op_tc_2_SLOT0123:
2489  case Hexagon::Sched::ALU64_tc_2_SLOT23:
2490  case Hexagon::Sched::CR_tc_2_SLOT3:
2491  case Hexagon::Sched::M_tc_2_SLOT23:
2492  case Hexagon::Sched::S_2op_tc_2_SLOT23:
2493  case Hexagon::Sched::S_3op_tc_2_SLOT23:
2494  return true;
2495 
2496  default:
2497  return false;
2498  }
2499 }
2500 
2502  unsigned SchedClass = MI.getDesc().getSchedClass();
2503  switch (SchedClass) {
2504  case Hexagon::Sched::ALU32_2op_tc_2early_SLOT0123:
2505  case Hexagon::Sched::ALU32_3op_tc_2early_SLOT0123:
2506  case Hexagon::Sched::ALU64_tc_2early_SLOT23:
2507  case Hexagon::Sched::CR_tc_2early_SLOT23:
2508  case Hexagon::Sched::CR_tc_2early_SLOT3:
2509  case Hexagon::Sched::J_tc_2early_SLOT0123:
2510  case Hexagon::Sched::J_tc_2early_SLOT2:
2511  case Hexagon::Sched::J_tc_2early_SLOT23:
2512  case Hexagon::Sched::S_2op_tc_2early_SLOT23:
2513  case Hexagon::Sched::S_3op_tc_2early_SLOT23:
2514  return true;
2515 
2516  default:
2517  return false;
2518  }
2519 }
2520 
2522  unsigned SchedClass = MI.getDesc().getSchedClass();
2523  return SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23;
2524 }
2525 
2526 // Schedule this ASAP.
2528  const MachineInstr &MI2) const {
2529  if (mayBeCurLoad(MI1)) {
2530  // if (result of SU is used in Next) return true;
2531  unsigned DstReg = MI1.getOperand(0).getReg();
2532  int N = MI2.getNumOperands();
2533  for (int I = 0; I < N; I++)
2534  if (MI2.getOperand(I).isReg() && DstReg == MI2.getOperand(I).getReg())
2535  return true;
2536  }
2537  if (mayBeNewStore(MI2))
2538  if (MI2.getOpcode() == Hexagon::V6_vS32b_pi)
2539  if (MI1.getOperand(0).isReg() && MI2.getOperand(3).isReg() &&
2540  MI1.getOperand(0).getReg() == MI2.getOperand(3).getReg())
2541  return true;
2542  return false;
2543 }
2544 
2546  const uint64_t V = getType(MI);
2548 }
2549 
2550 // Check if the Offset is a valid auto-inc imm by Load/Store Type.
2551 //
2552 bool HexagonInstrInfo::isValidAutoIncImm(const EVT VT, const int Offset) const {
2553  if (VT == MVT::v16i32 || VT == MVT::v8i64 ||
2554  VT == MVT::v32i16 || VT == MVT::v64i8) {
2555  return (Offset >= Hexagon_MEMV_AUTOINC_MIN &&
2556  Offset <= Hexagon_MEMV_AUTOINC_MAX &&
2557  (Offset & 0x3f) == 0);
2558  }
2559  // 128B
2560  if (VT == MVT::v32i32 || VT == MVT::v16i64 ||
2561  VT == MVT::v64i16 || VT == MVT::v128i8) {
2562  return (Offset >= Hexagon_MEMV_AUTOINC_MIN_128B &&
2563  Offset <= Hexagon_MEMV_AUTOINC_MAX_128B &&
2564  (Offset & 0x7f) == 0);
2565  }
2566  if (VT == MVT::i64) {
2567  return (Offset >= Hexagon_MEMD_AUTOINC_MIN &&
2568  Offset <= Hexagon_MEMD_AUTOINC_MAX &&
2569  (Offset & 0x7) == 0);
2570  }
2571  if (VT == MVT::i32) {
2572  return (Offset >= Hexagon_MEMW_AUTOINC_MIN &&
2573  Offset <= Hexagon_MEMW_AUTOINC_MAX &&
2574  (Offset & 0x3) == 0);
2575  }
2576  if (VT == MVT::i16) {
2577  return (Offset >= Hexagon_MEMH_AUTOINC_MIN &&
2578  Offset <= Hexagon_MEMH_AUTOINC_MAX &&
2579  (Offset & 0x1) == 0);
2580  }
2581  if (VT == MVT::i8) {
2582  return (Offset >= Hexagon_MEMB_AUTOINC_MIN &&
2583  Offset <= Hexagon_MEMB_AUTOINC_MAX);
2584  }
2585  llvm_unreachable("Not an auto-inc opc!");
2586 }
2587 
2588 bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
2589  bool Extend) const {
2590  // This function is to check whether the "Offset" is in the correct range of
2591  // the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is
2592  // inserted to calculate the final address. Due to this reason, the function
2593  // assumes that the "Offset" has correct alignment.
2594  // We used to assert if the offset was not properly aligned, however,
2595  // there are cases where a misaligned pointer recast can cause this
2596  // problem, and we need to allow for it. The front end warns of such
2597  // misaligns with respect to load size.
2598 
2599  switch (Opcode) {
2600  case Hexagon::PS_vstorerq_ai:
2601  case Hexagon::PS_vstorerw_ai:
2602  case Hexagon::PS_vloadrq_ai:
2603  case Hexagon::PS_vloadrw_ai:
2604  case Hexagon::V6_vL32b_ai:
2605  case Hexagon::V6_vS32b_ai:
2606  case Hexagon::V6_vL32Ub_ai:
2607  case Hexagon::V6_vS32Ub_ai:
2608  return (Offset >= Hexagon_MEMV_OFFSET_MIN) &&
2609  (Offset <= Hexagon_MEMV_OFFSET_MAX);
2610 
2611  case Hexagon::PS_vstorerq_ai_128B:
2612  case Hexagon::PS_vstorerw_ai_128B:
2613  case Hexagon::PS_vloadrq_ai_128B:
2614  case Hexagon::PS_vloadrw_ai_128B:
2615  case Hexagon::V6_vL32b_ai_128B:
2616  case Hexagon::V6_vS32b_ai_128B:
2617  case Hexagon::V6_vL32Ub_ai_128B:
2618  case Hexagon::V6_vS32Ub_ai_128B:
2619  return (Offset >= Hexagon_MEMV_OFFSET_MIN_128B) &&
2620  (Offset <= Hexagon_MEMV_OFFSET_MAX_128B);
2621 
2622  case Hexagon::J2_loop0i:
2623  case Hexagon::J2_loop1i:
2624  return isUInt<10>(Offset);
2625 
2626  case Hexagon::S4_storeirb_io:
2627  case Hexagon::S4_storeirbt_io:
2628  case Hexagon::S4_storeirbf_io:
2629  return isUInt<6>(Offset);
2630 
2631  case Hexagon::S4_storeirh_io:
2632  case Hexagon::S4_storeirht_io:
2633  case Hexagon::S4_storeirhf_io:
2634  return isShiftedUInt<6,1>(Offset);
2635 
2636  case Hexagon::S4_storeiri_io:
2637  case Hexagon::S4_storeirit_io:
2638  case Hexagon::S4_storeirif_io:
2639  return isShiftedUInt<6,2>(Offset);
2640  }
2641 
2642  if (Extend)
2643  return true;
2644 
2645  switch (Opcode) {
2646  case Hexagon::L2_loadri_io:
2647  case Hexagon::S2_storeri_io:
2648  return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
2649  (Offset <= Hexagon_MEMW_OFFSET_MAX);
2650 
2651  case Hexagon::L2_loadrd_io:
2652  case Hexagon::S2_storerd_io:
2653  return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
2654  (Offset <= Hexagon_MEMD_OFFSET_MAX);
2655 
2656  case Hexagon::L2_loadrh_io:
2657  case Hexagon::L2_loadruh_io:
2658  case Hexagon::S2_storerh_io:
2659  return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
2660  (Offset <= Hexagon_MEMH_OFFSET_MAX);
2661 
2662  case Hexagon::L2_loadrb_io:
2663  case Hexagon::L2_loadrub_io:
2664  case Hexagon::S2_storerb_io:
2665  return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
2666  (Offset <= Hexagon_MEMB_OFFSET_MAX);
2667 
2668  case Hexagon::A2_addi:
2669  return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
2670  (Offset <= Hexagon_ADDI_OFFSET_MAX);
2671 
2672  case Hexagon::L4_iadd_memopw_io :
2673  case Hexagon::L4_isub_memopw_io :
2674  case Hexagon::L4_add_memopw_io :
2675  case Hexagon::L4_sub_memopw_io :
2676  case Hexagon::L4_and_memopw_io :
2677  case Hexagon::L4_or_memopw_io :
2678  return (0 <= Offset && Offset <= 255);
2679 
2680  case Hexagon::L4_iadd_memoph_io :
2681  case Hexagon::L4_isub_memoph_io :
2682  case Hexagon::L4_add_memoph_io :
2683  case Hexagon::L4_sub_memoph_io :
2684  case Hexagon::L4_and_memoph_io :
2685  case Hexagon::L4_or_memoph_io :
2686  return (0 <= Offset && Offset <= 127);
2687 
2688  case Hexagon::L4_iadd_memopb_io :
2689  case Hexagon::L4_isub_memopb_io :
2690  case Hexagon::L4_add_memopb_io :
2691  case Hexagon::L4_sub_memopb_io :
2692  case Hexagon::L4_and_memopb_io :
2693  case Hexagon::L4_or_memopb_io :
2694  return (0 <= Offset && Offset <= 63);
2695 
2696  // LDriw_xxx and STriw_xxx are pseudo operations, so it has to take offset of
2697  // any size. Later pass knows how to handle it.
2698  case Hexagon::STriw_pred:
2699  case Hexagon::LDriw_pred:
2700  case Hexagon::STriw_mod:
2701  case Hexagon::LDriw_mod:
2702  return true;
2703 
2704  case Hexagon::PS_fi:
2705  case Hexagon::PS_fia:
2706  case Hexagon::INLINEASM:
2707  return true;
2708 
2709  case Hexagon::L2_ploadrbt_io:
2710  case Hexagon::L2_ploadrbf_io:
2711  case Hexagon::L2_ploadrubt_io:
2712  case Hexagon::L2_ploadrubf_io:
2713  case Hexagon::S2_pstorerbt_io:
2714  case Hexagon::S2_pstorerbf_io:
2715  return isUInt<6>(Offset);
2716 
2717  case Hexagon::L2_ploadrht_io:
2718  case Hexagon::L2_ploadrhf_io:
2719  case Hexagon::L2_ploadruht_io:
2720  case Hexagon::L2_ploadruhf_io:
2721  case Hexagon::S2_pstorerht_io:
2722  case Hexagon::S2_pstorerhf_io:
2723  return isShiftedUInt<6,1>(Offset);
2724 
2725  case Hexagon::L2_ploadrit_io:
2726  case Hexagon::L2_ploadrif_io:
2727  case Hexagon::S2_pstorerit_io:
2728  case Hexagon::S2_pstorerif_io:
2729  return isShiftedUInt<6,2>(Offset);
2730 
2731  case Hexagon::L2_ploadrdt_io:
2732  case Hexagon::L2_ploadrdf_io:
2733  case Hexagon::S2_pstorerdt_io:
2734  case Hexagon::S2_pstorerdf_io:
2735  return isShiftedUInt<6,3>(Offset);
2736  } // switch
2737 
2738  llvm_unreachable("No offset range is defined for this opcode. "
2739  "Please define it in the above switch statement!");
2740 }
2741 
2743  return isV60VectorInstruction(MI) && isAccumulator(MI);
2744 }
2745 
2747  const uint64_t F = get(MI.getOpcode()).TSFlags;
2748  const uint64_t V = ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
2749  return
2750  V == HexagonII::TypeCVI_VA ||
2752 }
2753 
2755  const MachineInstr &ConsMI) const {
2756  if (EnableACCForwarding && isVecAcc(ProdMI) && isVecAcc(ConsMI))
2757  return true;
2758 
2759  if (EnableALUForwarding && (isVecALU(ConsMI) || isLateSourceInstr(ConsMI)))
2760  return true;
2761 
2762  if (mayBeNewStore(ConsMI))
2763  return true;
2764 
2765  return false;
2766 }
2767 
2769  switch (MI.getOpcode()) {
2770  // Byte
2771  case Hexagon::L2_loadrub_io:
2772  case Hexagon::L4_loadrub_ur:
2773  case Hexagon::L4_loadrub_ap:
2774  case Hexagon::L2_loadrub_pr:
2775  case Hexagon::L2_loadrub_pbr:
2776  case Hexagon::L2_loadrub_pi:
2777  case Hexagon::L2_loadrub_pci:
2778  case Hexagon::L2_loadrub_pcr:
2779  case Hexagon::L2_loadbzw2_io:
2780  case Hexagon::L4_loadbzw2_ur:
2781  case Hexagon::L4_loadbzw2_ap:
2782  case Hexagon::L2_loadbzw2_pr:
2783  case Hexagon::L2_loadbzw2_pbr:
2784  case Hexagon::L2_loadbzw2_pi:
2785  case Hexagon::L2_loadbzw2_pci:
2786  case Hexagon::L2_loadbzw2_pcr:
2787  case Hexagon::L2_loadbzw4_io:
2788  case Hexagon::L4_loadbzw4_ur:
2789  case Hexagon::L4_loadbzw4_ap:
2790  case Hexagon::L2_loadbzw4_pr:
2791  case Hexagon::L2_loadbzw4_pbr:
2792  case Hexagon::L2_loadbzw4_pi:
2793  case Hexagon::L2_loadbzw4_pci:
2794  case Hexagon::L2_loadbzw4_pcr:
2795  case Hexagon::L4_loadrub_rr:
2796  case Hexagon::L2_ploadrubt_io:
2797  case Hexagon::L2_ploadrubt_pi:
2798  case Hexagon::L2_ploadrubf_io:
2799  case Hexagon::L2_ploadrubf_pi:
2800  case Hexagon::L2_ploadrubtnew_io:
2801  case Hexagon::L2_ploadrubfnew_io:
2802  case Hexagon::L4_ploadrubt_rr:
2803  case Hexagon::L4_ploadrubf_rr:
2804  case Hexagon::L4_ploadrubtnew_rr:
2805  case Hexagon::L4_ploadrubfnew_rr:
2806  case Hexagon::L2_ploadrubtnew_pi:
2807  case Hexagon::L2_ploadrubfnew_pi:
2808  case Hexagon::L4_ploadrubt_abs:
2809  case Hexagon::L4_ploadrubf_abs:
2810  case Hexagon::L4_ploadrubtnew_abs:
2811  case Hexagon::L4_ploadrubfnew_abs:
2812  case Hexagon::L2_loadrubgp:
2813  // Half
2814  case Hexagon::L2_loadruh_io:
2815  case Hexagon::L4_loadruh_ur:
2816  case Hexagon::L4_loadruh_ap:
2817  case Hexagon::L2_loadruh_pr:
2818  case Hexagon::L2_loadruh_pbr:
2819  case Hexagon::L2_loadruh_pi:
2820  case Hexagon::L2_loadruh_pci:
2821  case Hexagon::L2_loadruh_pcr:
2822  case Hexagon::L4_loadruh_rr:
2823  case Hexagon::L2_ploadruht_io:
2824  case Hexagon::L2_ploadruht_pi:
2825  case Hexagon::L2_ploadruhf_io:
2826  case Hexagon::L2_ploadruhf_pi:
2827  case Hexagon::L2_ploadruhtnew_io:
2828  case Hexagon::L2_ploadruhfnew_io:
2829  case Hexagon::L4_ploadruht_rr:
2830  case Hexagon::L4_ploadruhf_rr:
2831  case Hexagon::L4_ploadruhtnew_rr:
2832  case Hexagon::L4_ploadruhfnew_rr:
2833  case Hexagon::L2_ploadruhtnew_pi:
2834  case Hexagon::L2_ploadruhfnew_pi:
2835  case Hexagon::L4_ploadruht_abs:
2836  case Hexagon::L4_ploadruhf_abs:
2837  case Hexagon::L4_ploadruhtnew_abs:
2838  case Hexagon::L4_ploadruhfnew_abs:
2839  case Hexagon::L2_loadruhgp:
2840  return true;
2841  default:
2842  return false;
2843  }
2844 }
2845 
2846 // Add latency to instruction.
2848  const MachineInstr &MI2) const {
2850  if (!isVecUsableNextPacket(MI1, MI2))
2851  return true;
2852  return false;
2853 }
2854 
2855 /// \brief Get the base register and byte offset of a load/store instr.
2857  unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI)
2858  const {
2859  unsigned AccessSize = 0;
2860  int OffsetVal = 0;
2861  BaseReg = getBaseAndOffset(LdSt, OffsetVal, AccessSize);
2862  Offset = OffsetVal;
2863  return BaseReg != 0;
2864 }
2865 
2866 /// \brief Can these instructions execute at the same time in a bundle.
2868  const MachineInstr &Second) const {
2869  if (DisableNVSchedule)
2870  return false;
2871  if (mayBeNewStore(Second)) {
2872  // Make sure the definition of the first instruction is the value being
2873  // stored.
2874  const MachineOperand &Stored =
2875  Second.getOperand(Second.getNumOperands() - 1);
2876  if (!Stored.isReg())
2877  return false;
2878  for (unsigned i = 0, e = First.getNumOperands(); i < e; ++i) {
2879  const MachineOperand &Op = First.getOperand(i);
2880  if (Op.isReg() && Op.isDef() && Op.getReg() == Stored.getReg())
2881  return true;
2882  }
2883  }
2884  return false;
2885 }
2886 
2888  unsigned Opc = CallMI.getOpcode();
2889  return Opc == Hexagon::PS_call_nr || Opc == Hexagon::PS_callr_nr;
2890 }
2891 
2893  for (auto &I : *B)
2894  if (I.isEHLabel())
2895  return true;
2896  return false;
2897 }
2898 
2899 // Returns true if an instruction can be converted into a non-extended
2900 // equivalent instruction.
2902  short NonExtOpcode;
2903  // Check if the instruction has a register form that uses register in place
2904  // of the extended operand, if so return that as the non-extended form.
2905  if (Hexagon::getRegForm(MI.getOpcode()) >= 0)
2906  return true;
2907 
2908  if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
2909  // Check addressing mode and retrieve non-ext equivalent instruction.
2910 
2911  switch (getAddrMode(MI)) {
2912  case HexagonII::Absolute :
2913  // Load/store with absolute addressing mode can be converted into
2914  // base+offset mode.
2915  NonExtOpcode = Hexagon::getBaseWithImmOffset(MI.getOpcode());
2916  break;
2918  // Load/store with base+offset addressing mode can be converted into
2919  // base+register offset addressing mode. However left shift operand should
2920  // be set to 0.
2921  NonExtOpcode = Hexagon::getBaseWithRegOffset(MI.getOpcode());
2922  break;
2924  NonExtOpcode = Hexagon::getRegShlForm(MI.getOpcode());
2925  break;
2926  default:
2927  return false;
2928  }
2929  if (NonExtOpcode < 0)
2930  return false;
2931  return true;
2932  }
2933  return false;
2934 }
2935 
2937  return Hexagon::getRealHWInstr(MI.getOpcode(),
2938  Hexagon::InstrType_Pseudo) >= 0;
2939 }
2940 
2942  const {
2944  while (I != E) {
2945  if (I->isBarrier())
2946  return true;
2947  ++I;
2948  }
2949  return false;
2950 }
2951 
2952 // Returns true, if a LD insn can be promoted to a cur load.
2954  auto &HST = MI.getParent()->getParent()->getSubtarget<HexagonSubtarget>();
2955  const uint64_t F = MI.getDesc().TSFlags;
2957  HST.hasV60TOps();
2958 }
2959 
2960 // Returns true, if a ST insn can be promoted to a new-value store.
2962  const uint64_t F = MI.getDesc().TSFlags;
2964 }
2965 
2967  const MachineInstr &ConsMI) const {
2968  // There is no stall when ProdMI is not a V60 vector.
2969  if (!isV60VectorInstruction(ProdMI))
2970  return false;
2971 
2972  // There is no stall when ProdMI and ConsMI are not dependent.
2973  if (!isDependent(ProdMI, ConsMI))
2974  return false;
2975 
2976  // When Forward Scheduling is enabled, there is no stall if ProdMI and ConsMI
2977  // are scheduled in consecutive packets.
2978  if (isVecUsableNextPacket(ProdMI, ConsMI))
2979  return false;
2980 
2981  return true;
2982 }
2983 
2986  // There is no stall when I is not a V60 vector.
2987  if (!isV60VectorInstruction(MI))
2988  return false;
2989 
2991  MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end();
2992 
2993  if (!MII->isBundle()) {
2994  const MachineInstr &J = *MII;
2995  if (!isV60VectorInstruction(J))
2996  return false;
2997  else if (isVecUsableNextPacket(J, MI))
2998  return false;
2999  return true;
3000  }
3001 
3002  for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
3003  const MachineInstr &J = *MII;
3004  if (producesStall(J, MI))
3005  return true;
3006  }
3007  return false;
3008 }
3009 
3011  unsigned PredReg) const {
3012  for (unsigned opNum = 0; opNum < MI.getNumOperands(); opNum++) {
3013  const MachineOperand &MO = MI.getOperand(opNum);
3014  if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
3015  return false; // Predicate register must be explicitly defined.
3016  }
3017 
3018  // Hexagon Programmer's Reference says that decbin, memw_locked, and
3019  // memd_locked cannot be used as .new as well,
3020  // but we don't seem to have these instructions defined.
3021  return MI.getOpcode() != Hexagon::A4_tlbmatch;
3022 }
3023 
3024 bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const {
3025  return (Opcode == Hexagon::J2_jumpt) ||
3026  (Opcode == Hexagon::J2_jumpf) ||
3027  (Opcode == Hexagon::J2_jumptnew) ||
3028  (Opcode == Hexagon::J2_jumpfnew) ||
3029  (Opcode == Hexagon::J2_jumptnewpt) ||
3030  (Opcode == Hexagon::J2_jumpfnewpt);
3031 }
3032 
3034  if (Cond.empty() || !isPredicated(Cond[0].getImm()))
3035  return false;
3036  return !isPredicatedTrue(Cond[0].getImm());
3037 }
3038 
3040  return Hexagon::getAbsoluteForm(MI.getOpcode());
3041 }
3042 
3044  const uint64_t F = MI.getDesc().TSFlags;
3046 }
3047 
3048 // Returns the base register in a memory access (load/store). The offset is
3049 // returned in Offset and the access size is returned in AccessSize.
3051  int &Offset, unsigned &AccessSize) const {
3052  // Return if it is not a base+offset type instruction or a MemOp.
3055  !isMemOp(MI) && !isPostIncrement(MI))
3056  return 0;
3057 
3058  // Since it is a memory access instruction, getMemAccessSize() should never
3059  // return 0.
3060  assert (getMemAccessSize(MI) &&
3061  "BaseImmOffset or BaseLongOffset or MemOp without accessSize");
3062 
3063  // Return Values of getMemAccessSize() are
3064  // 0 - Checked in the assert above.
3065  // 1, 2, 3, 4 & 7, 8 - The statement below is correct for all these.
3066  // MemAccessSize is represented as 1+log2(N) where N is size in bits.
3067  AccessSize = (1U << (getMemAccessSize(MI) - 1));
3068 
3069  unsigned basePos = 0, offsetPos = 0;
3070  if (!getBaseAndOffsetPosition(MI, basePos, offsetPos))
3071  return 0;
3072 
3073  // Post increment updates its EA after the mem access,
3074  // so we need to treat its offset as zero.
3075  if (isPostIncrement(MI))
3076  Offset = 0;
3077  else {
3078  Offset = MI.getOperand(offsetPos).getImm();
3079  }
3080 
3081  return MI.getOperand(basePos).getReg();
3082 }
3083 
3084 /// Return the position of the base and offset operands for this instruction.
3086  unsigned &BasePos, unsigned &OffsetPos) const {
3087  // Deal with memops first.
3088  if (isMemOp(MI)) {
3089  BasePos = 0;
3090  OffsetPos = 1;
3091  } else if (MI.mayStore()) {
3092  BasePos = 0;
3093  OffsetPos = 1;
3094  } else if (MI.mayLoad()) {
3095  BasePos = 1;
3096  OffsetPos = 2;
3097  } else
3098  return false;
3099 
3100  if (isPredicated(MI)) {
3101  BasePos++;
3102  OffsetPos++;
3103  }
3104  if (isPostIncrement(MI)) {
3105  BasePos++;
3106  OffsetPos++;
3107  }
3108 
3109  if (!MI.getOperand(BasePos).isReg() || !MI.getOperand(OffsetPos).isImm())
3110  return false;
3111 
3112  return true;
3113 }
3114 
3115 // Inserts branching instructions in reverse order of their occurrence.
3116 // e.g. jump_t t1 (i1)
3117 // jump t2 (i2)
3118 // Jumpers = {i2, i1}
3120  MachineBasicBlock& MBB) const {
3122  // If the block has no terminators, it just falls into the block after it.
3124  if (I == MBB.instr_begin())
3125  return Jumpers;
3126 
3127  // A basic block may looks like this:
3128  //
3129  // [ insn
3130  // EH_LABEL
3131  // insn
3132  // insn
3133  // insn
3134  // EH_LABEL
3135  // insn ]
3136  //
3137  // It has two succs but does not have a terminator
3138  // Don't know how to handle it.
3139  do {
3140  --I;
3141  if (I->isEHLabel())
3142  return Jumpers;
3143  } while (I != MBB.instr_begin());
3144 
3145  I = MBB.instr_end();
3146  --I;
3147 
3148  while (I->isDebugValue()) {
3149  if (I == MBB.instr_begin())
3150  return Jumpers;
3151  --I;
3152  }
3153  if (!isUnpredicatedTerminator(*I))
3154  return Jumpers;
3155 
3156  // Get the last instruction in the block.
3157  MachineInstr *LastInst = &*I;
3158  Jumpers.push_back(LastInst);
3159  MachineInstr *SecondLastInst = nullptr;
3160  // Find one more terminator if present.
3161  do {
3162  if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
3163  if (!SecondLastInst) {
3164  SecondLastInst = &*I;
3165  Jumpers.push_back(SecondLastInst);
3166  } else // This is a third branch.
3167  return Jumpers;
3168  }
3169  if (I == MBB.instr_begin())
3170  break;
3171  --I;
3172  } while (true);
3173  return Jumpers;
3174 }
3175 
3176 short HexagonInstrInfo::getBaseWithLongOffset(short Opcode) const {
3177  if (Opcode < 0)
3178  return -1;
3179  return Hexagon::getBaseWithLongOffset(Opcode);
3180 }
3181 
3183  return Hexagon::getBaseWithLongOffset(MI.getOpcode());
3184 }
3185 
3187  return Hexagon::getBaseWithRegOffset(MI.getOpcode());
3188 }
3189 
3190 // Returns Operand Index for the constant extended instruction.
3192  const uint64_t F = MI.getDesc().TSFlags;
3194 }
3195 
3196 // See if instruction could potentially be a duplex candidate.
3197 // If so, return its group. Zero otherwise.
3199  const MachineInstr &MI) const {
3200  unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3201 
3202  switch (MI.getOpcode()) {
3203  default:
3204  return HexagonII::HCG_None;
3205  //
3206  // Compound pairs.
3207  // "p0=cmp.eq(Rs16,Rt16); if (p0.new) jump:nt #r9:2"
3208  // "Rd16=#U6 ; jump #r9:2"
3209  // "Rd16=Rs16 ; jump #r9:2"
3210  //
3211  case Hexagon::C2_cmpeq:
3212  case Hexagon::C2_cmpgt:
3213  case Hexagon::C2_cmpgtu:
3214  DstReg = MI.getOperand(0).getReg();
3215  Src1Reg = MI.getOperand(1).getReg();
3216  Src2Reg = MI.getOperand(2).getReg();
3217  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3218  (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3219  isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg))
3220  return HexagonII::HCG_A;
3221  break;
3222  case Hexagon::C2_cmpeqi:
3223  case Hexagon::C2_cmpgti:
3224  case Hexagon::C2_cmpgtui:
3225  // P0 = cmp.eq(Rs,#u2)
3226  DstReg = MI.getOperand(0).getReg();
3227  SrcReg = MI.getOperand(1).getReg();
3228  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3229  (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3230  isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
3231  ((isUInt<5>(MI.getOperand(2).getImm())) ||
3232  (MI.getOperand(2).getImm() == -1)))
3233  return HexagonII::HCG_A;
3234  break;
3235  case Hexagon::A2_tfr:
3236  // Rd = Rs
3237  DstReg = MI.getOperand(0).getReg();
3238  SrcReg = MI.getOperand(1).getReg();
3239  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3240  return HexagonII::HCG_A;
3241  break;
3242  case Hexagon::A2_tfrsi:
3243  // Rd = #u6
3244  // Do not test for #u6 size since the const is getting extended
3245  // regardless and compound could be formed.
3246  DstReg = MI.getOperand(0).getReg();
3247  if (isIntRegForSubInst(DstReg))
3248  return HexagonII::HCG_A;
3249  break;
3250  case Hexagon::S2_tstbit_i:
3251  DstReg = MI.getOperand(0).getReg();
3252  Src1Reg = MI.getOperand(1).getReg();
3253  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3254  (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3255  MI.getOperand(2).isImm() &&
3256  isIntRegForSubInst(Src1Reg) && (MI.getOperand(2).getImm() == 0))
3257  return HexagonII::HCG_A;
3258  break;
3259  // The fact that .new form is used pretty much guarantees
3260  // that predicate register will match. Nevertheless,
3261  // there could be some false positives without additional
3262  // checking.
3263  case Hexagon::J2_jumptnew:
3264  case Hexagon::J2_jumpfnew:
3265  case Hexagon::J2_jumptnewpt:
3266  case Hexagon::J2_jumpfnewpt:
3267  Src1Reg = MI.getOperand(0).getReg();
3268  if (Hexagon::PredRegsRegClass.contains(Src1Reg) &&
3269  (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
3270  return HexagonII::HCG_B;
3271  break;
3272  // Transfer and jump:
3273  // Rd=#U6 ; jump #r9:2
3274  // Rd=Rs ; jump #r9:2
3275  // Do not test for jump range here.
3276  case Hexagon::J2_jump:
3277  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3278  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3279  return HexagonII::HCG_C;
3280  break;
3281  }
3282 
3283  return HexagonII::HCG_None;
3284 }
3285 
3286 // Returns -1 when there is no opcode found.
3288  const MachineInstr &GB) const {
3291  if ((GA.getOpcode() != Hexagon::C2_cmpeqi) ||
3292  (GB.getOpcode() != Hexagon::J2_jumptnew))
3293  return -1;
3294  unsigned DestReg = GA.getOperand(0).getReg();
3295  if (!GB.readsRegister(DestReg))
3296  return -1;
3297  if (DestReg == Hexagon::P0)
3298  return Hexagon::J4_cmpeqi_tp0_jump_nt;
3299  if (DestReg == Hexagon::P1)
3300  return Hexagon::J4_cmpeqi_tp1_jump_nt;
3301  return -1;
3302 }
3303 
3304 int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const {
3305  enum Hexagon::PredSense inPredSense;
3306  inPredSense = invertPredicate ? Hexagon::PredSense_false :
3307  Hexagon::PredSense_true;
3308  int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
3309  if (CondOpcode >= 0) // Valid Conditional opcode/instruction
3310  return CondOpcode;
3311 
3312  llvm_unreachable("Unexpected predicable instruction");
3313 }
3314 
3315 // Return the cur value instruction for a given store.
3317  switch (MI.getOpcode()) {
3318  default: llvm_unreachable("Unknown .cur type");
3319  case Hexagon::V6_vL32b_pi:
3320  return Hexagon::V6_vL32b_cur_pi;
3321  case Hexagon::V6_vL32b_ai:
3322  return Hexagon::V6_vL32b_cur_ai;
3323  //128B
3324  case Hexagon::V6_vL32b_pi_128B:
3325  return Hexagon::V6_vL32b_cur_pi_128B;
3326  case Hexagon::V6_vL32b_ai_128B:
3327  return Hexagon::V6_vL32b_cur_ai_128B;
3328  }
3329  return 0;
3330 }
3331 
3332 // The diagram below shows the steps involved in the conversion of a predicated
3333 // store instruction to its .new predicated new-value form.
3334 //
3335 // p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
3336 // ^ ^
3337 // / \ (not OK. it will cause new-value store to be
3338 // / X conditional on p0.new while R2 producer is
3339 // / \ on p0)
3340 // / \.
3341 // p.new store p.old NV store
3342 // [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
3343 // ^ ^
3344 // \ /
3345 // \ /
3346 // \ /
3347 // p.old store
3348 // [if (p0)memw(R0+#0)=R2]
3349 //
3350 //
3351 // The following set of instructions further explains the scenario where
3352 // conditional new-value store becomes invalid when promoted to .new predicate
3353 // form.
3354 //
3355 // { 1) if (p0) r0 = add(r1, r2)
3356 // 2) p0 = cmp.eq(r3, #0) }
3357 //
3358 // 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
3359 // the first two instructions because in instr 1, r0 is conditional on old value
3360 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
3361 // is not valid for new-value stores.
3362 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
3363 // from the "Conditional Store" list. Because a predicated new value store
3364 // would NOT be promoted to a double dot new store. See diagram below:
3365 // This function returns yes for those stores that are predicated but not
3366 // yet promoted to predicate dot new instructions.
3367 //
3368 // +---------------------+
3369 // /-----| if (p0) memw(..)=r0 |---------\~
3370 // || +---------------------+ ||
3371 // promote || /\ /\ || promote
3372 // || /||\ /||\ ||
3373 // \||/ demote || \||/
3374 // \/ || || \/
3375 // +-------------------------+ || +-------------------------+
3376 // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
3377 // +-------------------------+ || +-------------------------+
3378 // || || ||
3379 // || demote \||/
3380 // promote || \/ NOT possible
3381 // || || /\~
3382 // \||/ || /||\~
3383 // \/ || ||
3384 // +-----------------------------+
3385 // | if (p0.new) memw(..)=r0.new |
3386 // +-----------------------------+
3387 // Double Dot New Store
3388 //
3389 // Returns the most basic instruction for the .new predicated instructions and
3390 // new-value stores.
3391 // For example, all of the following instructions will be converted back to the
3392 // same instruction:
3393 // 1) if (p0.new) memw(R0+#0) = R1.new --->
3394 // 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
3395 // 3) if (p0.new) memw(R0+#0) = R1 --->
3396 //
3397 // To understand the translation of instruction 1 to its original form, consider
3398 // a packet with 3 instructions.
3399 // { p0 = cmp.eq(R0,R1)
3400 // if (p0.new) R2 = add(R3, R4)
3401 // R5 = add (R3, R1)
3402 // }
3403 // if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
3404 //
3405 // This instruction can be part of the previous packet only if both p0 and R2
3406 // are promoted to .new values. This promotion happens in steps, first
3407 // predicate register is promoted to .new and in the next iteration R2 is
3408 // promoted. Therefore, in case of dependence check failure (due to R5) during
3409 // next iteration, it should be converted back to its most basic form.
3410 
3411 // Return the new value instruction for a given store.
3413  int NVOpcode = Hexagon::getNewValueOpcode(MI.getOpcode());
3414  if (NVOpcode >= 0) // Valid new-value store instruction.
3415  return NVOpcode;
3416 
3417  switch (MI.getOpcode()) {
3418  default: llvm_unreachable("Unknown .new type");
3419  case Hexagon::S4_storerb_ur:
3420  return Hexagon::S4_storerbnew_ur;
3421 
3422  case Hexagon::S2_storerb_pci:
3423  return Hexagon::S2_storerb_pci;
3424 
3425  case Hexagon::S2_storeri_pci:
3426  return Hexagon::S2_storeri_pci;
3427 
3428  case Hexagon::S2_storerh_pci:
3429  return Hexagon::S2_storerh_pci;
3430 
3431  case Hexagon::S2_storerd_pci:
3432  return Hexagon::S2_storerd_pci;
3433 
3434  case Hexagon::S2_storerf_pci:
3435  return Hexagon::S2_storerf_pci;
3436 
3437  case Hexagon::V6_vS32b_ai:
3438  return Hexagon::V6_vS32b_new_ai;
3439 
3440  case Hexagon::V6_vS32b_pi:
3441  return Hexagon::V6_vS32b_new_pi;
3442 
3443  // 128B
3444  case Hexagon::V6_vS32b_ai_128B:
3445  return Hexagon::V6_vS32b_new_ai_128B;
3446 
3447  case Hexagon::V6_vS32b_pi_128B:
3448  return Hexagon::V6_vS32b_new_pi_128B;
3449  }
3450  return 0;
3451 }
3452 
3453 // Returns the opcode to use when converting MI, which is a conditional jump,
3454 // into a conditional instruction which uses the .new value of the predicate.
3455 // We also use branch probabilities to add a hint to the jump.
3457  const MachineBranchProbabilityInfo *MBPI) const {
3458  // We assume that block can have at most two successors.
3459  bool taken = false;
3460  const MachineBasicBlock *Src = MI.getParent();
3461  const MachineOperand &BrTarget = MI.getOperand(1);
3462  const MachineBasicBlock *Dst = BrTarget.getMBB();
3463 
3464  const BranchProbability Prediction = MBPI->getEdgeProbability(Src, Dst);
3465  if (Prediction >= BranchProbability(1,2))
3466  taken = true;
3467 
3468  switch (MI.getOpcode()) {
3469  case Hexagon::J2_jumpt:
3470  return taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3471  case Hexagon::J2_jumpf:
3472  return taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3473 
3474  default:
3475  llvm_unreachable("Unexpected jump instruction.");
3476  }
3477 }
3478 
3479 // Return .new predicate version for an instruction.
3481  const MachineBranchProbabilityInfo *MBPI) const {
3482  int NewOpcode = Hexagon::getPredNewOpcode(MI.getOpcode());
3483  if (NewOpcode >= 0) // Valid predicate new instruction
3484  return NewOpcode;
3485 
3486  switch (MI.getOpcode()) {
3487  // Condtional Jumps
3488  case Hexagon::J2_jumpt:
3489  case Hexagon::J2_jumpf:
3490  return getDotNewPredJumpOp(MI, MBPI);
3491 
3492  default:
3493  assert(0 && "Unknown .new type");
3494  }
3495  return 0;
3496 }
3497 
3498 int HexagonInstrInfo::getDotOldOp(const int opc) const {
3499  int NewOp = opc;
3500  if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
3501  NewOp = Hexagon::getPredOldOpcode(NewOp);
3502  assert(NewOp >= 0 &&
3503  "Couldn't change predicate new instruction to its old form.");
3504  }
3505 
3506  if (isNewValueStore(NewOp)) { // Convert into non-new-value format
3507  NewOp = Hexagon::getNonNVStore(NewOp);
3508  assert(NewOp >= 0 && "Couldn't change new-value store to its old form.");
3509  }
3510  return NewOp;
3511 }
3512 
3513 // See if instruction could potentially be a duplex candidate.
3514 // If so, return its group. Zero otherwise.
3516  const MachineInstr &MI) const {
3517  unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3518  auto &HRI = getRegisterInfo();
3519 
3520  switch (MI.getOpcode()) {
3521  default:
3522  return HexagonII::HSIG_None;
3523  //
3524  // Group L1:
3525  //
3526  // Rd = memw(Rs+#u4:2)
3527  // Rd = memub(Rs+#u4:0)
3528  case Hexagon::L2_loadri_io:
3529  DstReg = MI.getOperand(0).getReg();
3530  SrcReg = MI.getOperand(1).getReg();
3531  // Special case this one from Group L2.
3532  // Rd = memw(r29+#u5:2)
3533  if (isIntRegForSubInst(DstReg)) {
3534  if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3535  HRI.getStackRegister() == SrcReg &&
3536  MI.getOperand(2).isImm() &&
3537  isShiftedUInt<5,2>(MI.getOperand(2).getImm()))
3538  return HexagonII::HSIG_L2;
3539  // Rd = memw(Rs+#u4:2)
3540  if (isIntRegForSubInst(SrcReg) &&
3541  (MI.getOperand(2).isImm() &&
3542  isShiftedUInt<4,2>(MI.getOperand(2).getImm())))
3543  return HexagonII::HSIG_L1;
3544  }
3545  break;
3546  case Hexagon::L2_loadrub_io:
3547  // Rd = memub(Rs+#u4:0)
3548  DstReg = MI.getOperand(0).getReg();
3549  SrcReg = MI.getOperand(1).getReg();
3550  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3551  MI.getOperand(2).isImm() && isUInt<4>(MI.getOperand(2).getImm()))
3552  return HexagonII::HSIG_L1;
3553  break;
3554  //
3555  // Group L2:
3556  //
3557  // Rd = memh/memuh(Rs+#u3:1)
3558  // Rd = memb(Rs+#u3:0)
3559  // Rd = memw(r29+#u5:2) - Handled above.
3560  // Rdd = memd(r29+#u5:3)
3561  // deallocframe
3562  // [if ([!]p0[.new])] dealloc_return
3563  // [if ([!]p0[.new])] jumpr r31
3564  case Hexagon::L2_loadrh_io:
3565  case Hexagon::L2_loadruh_io:
3566  // Rd = memh/memuh(Rs+#u3:1)
3567  DstReg = MI.getOperand(0).getReg();
3568  SrcReg = MI.getOperand(1).getReg();
3569  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3570  MI.getOperand(2).isImm() &&
3571  isShiftedUInt<3,1>(MI.getOperand(2).getImm()))
3572  return HexagonII::HSIG_L2;
3573  break;
3574  case Hexagon::L2_loadrb_io:
3575  // Rd = memb(Rs+#u3:0)
3576  DstReg = MI.getOperand(0).getReg();
3577  SrcReg = MI.getOperand(1).getReg();
3578  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3579  MI.getOperand(2).isImm() &&
3580  isUInt<3>(MI.getOperand(2).getImm()))
3581  return HexagonII::HSIG_L2;
3582  break;
3583  case Hexagon::L2_loadrd_io:
3584  // Rdd = memd(r29+#u5:3)
3585  DstReg = MI.getOperand(0).getReg();
3586  SrcReg = MI.getOperand(1).getReg();
3587  if (isDblRegForSubInst(DstReg, HRI) &&
3588  Hexagon::IntRegsRegClass.contains(SrcReg) &&
3589  HRI.getStackRegister() == SrcReg &&
3590  MI.getOperand(2).isImm() &&
3591  isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
3592  return HexagonII::HSIG_L2;
3593  break;
3594  // dealloc_return is not documented in Hexagon Manual, but marked
3595  // with A_SUBINSN attribute in iset_v4classic.py.
3596  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3597  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3598  case Hexagon::L4_return:
3599  case Hexagon::L2_deallocframe:
3600  return HexagonII::HSIG_L2;
3601  case Hexagon::EH_RETURN_JMPR:
3602  case Hexagon::PS_jmpret:
3603  // jumpr r31
3604  // Actual form JMPR %PC<imp-def>, %R31<imp-use>, %R0<imp-use,internal>.
3605  DstReg = MI.getOperand(0).getReg();
3606  if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
3607  return HexagonII::HSIG_L2;
3608  break;
3609  case Hexagon::PS_jmprett:
3610  case Hexagon::PS_jmpretf:
3611  case Hexagon::PS_jmprettnewpt:
3612  case Hexagon::PS_jmpretfnewpt:
3613  case Hexagon::PS_jmprettnew:
3614  case Hexagon::PS_jmpretfnew:
3615  DstReg = MI.getOperand(1).getReg();
3616  SrcReg = MI.getOperand(0).getReg();
3617  // [if ([!]p0[.new])] jumpr r31
3618  if ((Hexagon::PredRegsRegClass.contains(SrcReg) &&
3619  (Hexagon::P0 == SrcReg)) &&
3620  (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)))
3621  return HexagonII::HSIG_L2;
3622  break;
3623  case Hexagon::L4_return_t :
3624  case Hexagon::L4_return_f :
3625  case Hexagon::L4_return_tnew_pnt :
3626  case Hexagon::L4_return_fnew_pnt :
3627  case Hexagon::L4_return_tnew_pt :
3628  case Hexagon::L4_return_fnew_pt :
3629  // [if ([!]p0[.new])] dealloc_return
3630  SrcReg = MI.getOperand(0).getReg();
3631  if (Hexagon::PredRegsRegClass.contains(SrcReg) && (Hexagon::P0 == SrcReg))
3632  return HexagonII::HSIG_L2;
3633  break;
3634  //
3635  // Group S1:
3636  //
3637  // memw(Rs+#u4:2) = Rt
3638  // memb(Rs+#u4:0) = Rt
3639  case Hexagon::S2_storeri_io:
3640  // Special case this one from Group S2.
3641  // memw(r29+#u5:2) = Rt
3642  Src1Reg = MI.getOperand(0).getReg();
3643  Src2Reg = MI.getOperand(2).getReg();
3644  if (Hexagon::IntRegsRegClass.contains(Src1Reg) &&
3645  isIntRegForSubInst(Src2Reg) &&
3646  HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
3647  isShiftedUInt<5,2>(MI.getOperand(1).getImm()))
3648  return HexagonII::HSIG_S2;
3649  // memw(Rs+#u4:2) = Rt
3650  if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3651  MI.getOperand(1).isImm() &&
3652  isShiftedUInt<4,2>(MI.getOperand(1).getImm()))
3653  return HexagonII::HSIG_S1;
3654  break;
3655  case Hexagon::S2_storerb_io:
3656  // memb(Rs+#u4:0) = Rt
3657  Src1Reg = MI.getOperand(0).getReg();
3658  Src2Reg = MI.getOperand(2).getReg();
3659  if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3660  MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()))
3661  return HexagonII::HSIG_S1;
3662  break;
3663  //
3664  // Group S2:
3665  //
3666  // memh(Rs+#u3:1) = Rt
3667  // memw(r29+#u5:2) = Rt
3668  // memd(r29+#s6:3) = Rtt
3669  // memw(Rs+#u4:2) = #U1
3670  // memb(Rs+#u4) = #U1
3671  // allocframe(#u5:3)
3672  case Hexagon::S2_storerh_io:
3673  // memh(Rs+#u3:1) = Rt
3674  Src1Reg = MI.getOperand(0).getReg();
3675  Src2Reg = MI.getOperand(2).getReg();
3676  if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3677  MI.getOperand(1).isImm() &&
3678  isShiftedUInt<3,1>(MI.getOperand(1).getImm()))
3679  return HexagonII::HSIG_S1;
3680  break;
3681  case Hexagon::S2_storerd_io:
3682  // memd(r29+#s6:3) = Rtt
3683  Src1Reg = MI.getOperand(0).getReg();
3684  Src2Reg = MI.getOperand(2).getReg();
3685  if (isDblRegForSubInst(Src2Reg, HRI) &&
3686  Hexagon::IntRegsRegClass.contains(Src1Reg) &&
3687  HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
3688  isShiftedInt<6,3>(MI.getOperand(1).getImm()))
3689  return HexagonII::HSIG_S2;
3690  break;
3691  case Hexagon::S4_storeiri_io:
3692  // memw(Rs+#u4:2) = #U1
3693  Src1Reg = MI.getOperand(0).getReg();
3694  if (isIntRegForSubInst(Src1Reg) && MI.getOperand(1).isImm() &&
3695  isShiftedUInt<4,2>(MI.getOperand(1).getImm()) &&
3696  MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
3697  return HexagonII::HSIG_S2;
3698  break;
3699  case Hexagon::S4_storeirb_io:
3700  // memb(Rs+#u4) = #U1
3701  Src1Reg = MI.getOperand(0).getReg();
3702  if (isIntRegForSubInst(Src1Reg) &&
3703  MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()) &&
3704  MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
3705  return HexagonII::HSIG_S2;
3706  break;
3707  case Hexagon::S2_allocframe:
3708  if (MI.getOperand(0).isImm() &&
3709  isShiftedUInt<5,3>(MI.getOperand(0).getImm()))
3710  return HexagonII::HSIG_S1;
3711  break;
3712  //
3713  // Group A:
3714  //
3715  // Rx = add(Rx,#s7)
3716  // Rd = Rs
3717  // Rd = #u6
3718  // Rd = #-1
3719  // if ([!]P0[.new]) Rd = #0
3720  // Rd = add(r29,#u6:2)
3721  // Rx = add(Rx,Rs)
3722  // P0 = cmp.eq(Rs,#u2)
3723  // Rdd = combine(#0,Rs)
3724  // Rdd = combine(Rs,#0)
3725  // Rdd = combine(#u2,#U2)
3726  // Rd = add(Rs,#1)
3727  // Rd = add(Rs,#-1)
3728  // Rd = sxth/sxtb/zxtb/zxth(Rs)
3729  // Rd = and(Rs,#1)
3730  case Hexagon::A2_addi:
3731  DstReg = MI.getOperand(0).getReg();
3732  SrcReg = MI.getOperand(1).getReg();
3733  if (isIntRegForSubInst(DstReg)) {
3734  // Rd = add(r29,#u6:2)
3735  if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3736  HRI.getStackRegister() == SrcReg && MI.getOperand(2).isImm() &&
3737  isShiftedUInt<6,2>(MI.getOperand(2).getImm()))
3738  return HexagonII::HSIG_A;
3739  // Rx = add(Rx,#s7)
3740  if ((DstReg == SrcReg) && MI.getOperand(2).isImm() &&
3741  isInt<7>(MI.getOperand(2).getImm()))
3742  return HexagonII::HSIG_A;
3743  // Rd = add(Rs,#1)
3744  // Rd = add(Rs,#-1)
3745  if (isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
3746  ((MI.getOperand(2).getImm() == 1) ||
3747  (MI.getOperand(2).getImm() == -1)))
3748  return HexagonII::HSIG_A;
3749  }
3750  break;
3751  case Hexagon::A2_add:
3752  // Rx = add(Rx,Rs)
3753  DstReg = MI.getOperand(0).getReg();
3754  Src1Reg = MI.getOperand(1).getReg();
3755  Src2Reg = MI.getOperand(2).getReg();
3756  if (isIntRegForSubInst(DstReg) && (DstReg == Src1Reg) &&
3757  isIntRegForSubInst(Src2Reg))
3758  return HexagonII::HSIG_A;
3759  break;
3760  case Hexagon::A2_andir:
3761  // Same as zxtb.
3762  // Rd16=and(Rs16,#255)
3763  // Rd16=and(Rs16,#1)
3764  DstReg = MI.getOperand(0).getReg();
3765  SrcReg = MI.getOperand(1).getReg();
3766  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3767  MI.getOperand(2).isImm() &&
3768  ((MI.getOperand(2).getImm() == 1) ||
3769  (MI.getOperand(2).getImm() == 255)))
3770  return HexagonII::HSIG_A;
3771  break;
3772  case Hexagon::A2_tfr:
3773  // Rd = Rs
3774  DstReg = MI.getOperand(0).getReg();
3775  SrcReg = MI.getOperand(1).getReg();
3776  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3777  return HexagonII::HSIG_A;
3778  break;
3779  case Hexagon::A2_tfrsi:
3780  // Rd = #u6
3781  // Do not test for #u6 size since the const is getting extended
3782  // regardless and compound could be formed.
3783  // Rd = #-1
3784  DstReg = MI.getOperand(0).getReg();
3785  if (isIntRegForSubInst(DstReg))
3786  return HexagonII::HSIG_A;
3787  break;
3788  case Hexagon::C2_cmoveit:
3789  case Hexagon::C2_cmovenewit:
3790  case Hexagon::C2_cmoveif:
3791  case Hexagon::C2_cmovenewif:
3792  // if ([!]P0[.new]) Rd = #0
3793  // Actual form:
3794  // %R16<def> = C2_cmovenewit %P0<internal>, 0, %R16<imp-use,undef>;
3795  DstReg = MI.getOperand(0).getReg();
3796  SrcReg = MI.getOperand(1).getReg();
3797  if (isIntRegForSubInst(DstReg) &&
3798  Hexagon::PredRegsRegClass.contains(SrcReg) && Hexagon::P0 == SrcReg &&
3799  MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0)
3800  return HexagonII::HSIG_A;
3801  break;
3802  case Hexagon::C2_cmpeqi:
3803  // P0 = cmp.eq(Rs,#u2)
3804  DstReg = MI.getOperand(0).getReg();
3805  SrcReg = MI.getOperand(1).getReg();
3806  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3807  Hexagon::P0 == DstReg && isIntRegForSubInst(SrcReg) &&
3808  MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm()))
3809  return HexagonII::HSIG_A;
3810  break;
3811  case Hexagon::A2_combineii:
3812  case Hexagon::A4_combineii:
3813  // Rdd = combine(#u2,#U2)
3814  DstReg = MI.getOperand(0).getReg();
3815  if (isDblRegForSubInst(DstReg, HRI) &&
3816  ((MI.getOperand(1).isImm() && isUInt<2>(MI.getOperand(1).getImm())) ||
3817  (MI.getOperand(1).isGlobal() &&
3818  isUInt<2>(MI.getOperand(1).getOffset()))) &&
3819  ((MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm())) ||
3820  (MI.getOperand(2).isGlobal() &&
3821  isUInt<2>(MI.getOperand(2).getOffset()))))
3822  return HexagonII::HSIG_A;
3823  break;
3824  case Hexagon::A4_combineri:
3825  // Rdd = combine(Rs,#0)
3826  DstReg = MI.getOperand(0).getReg();
3827  SrcReg = MI.getOperand(1).getReg();
3828  if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
3829  ((MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) ||
3830  (MI.getOperand(2).isGlobal() && MI.getOperand(2).getOffset() == 0)))
3831  return HexagonII::HSIG_A;
3832  break;
3833  case Hexagon::A4_combineir:
3834  // Rdd = combine(#0,Rs)
3835  DstReg = MI.getOperand(0).getReg();
3836  SrcReg = MI.getOperand(2).getReg();
3837  if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
3838  ((MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) ||
3839  (MI.getOperand(1).isGlobal() && MI.getOperand(1).getOffset() == 0)))
3840  return HexagonII::HSIG_A;
3841  break;
3842  case Hexagon::A2_sxtb:
3843  case Hexagon::A2_sxth:
3844  case Hexagon::A2_zxtb:
3845  case Hexagon::A2_zxth:
3846  // Rd = sxth/sxtb/zxtb/zxth(Rs)
3847  DstReg = MI.getOperand(0).getReg();
3848  SrcReg = MI.getOperand(1).getReg();
3849  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3850  return HexagonII::HSIG_A;
3851  break;
3852  }
3853 
3854  return HexagonII::HSIG_None;
3855 }
3856 
3858  return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Real);
3859 }
3860 
3861 // Return first non-debug instruction in the basic block.
3863  const {
3864  for (auto MII = BB->instr_begin(), End = BB->instr_end(); MII != End; MII++) {
3865  MachineInstr &MI = *MII;
3866  if (MI.isDebugValue())
3867  continue;
3868  return &MI;
3869  }
3870  return nullptr;
3871 }
3872 
3874  const InstrItineraryData *ItinData, const MachineInstr &MI) const {
3875  // Default to one cycle for no itinerary. However, an "empty" itinerary may
3876  // still have a MinLatency property, which getStageLatency checks.
3877  if (!ItinData)
3878  return getInstrLatency(ItinData, MI);
3879 
3880  // Get the latency embedded in the itinerary. If we're not using timing class
3881  // latencies or if we using BSB scheduling, then restrict the maximum latency
3882  // to 1 (that is, either 0 or 1).
3883  if (MI.isTransient())
3884  return 0;
3885  unsigned Latency = ItinData->getStageLatency(MI.getDesc().getSchedClass());
3886  if (!EnableTimingClassLatency ||
3888  useBSBScheduling())
3889  if (Latency > 1)
3890  Latency = 1;
3891  return Latency;
3892 }
3893 
3894 // inverts the predication logic.
3895 // p -> NotP
3896 // NotP -> P
3898  SmallVectorImpl<MachineOperand> &Cond) const {
3899  if (Cond.empty())
3900  return false;
3901  unsigned Opc = getInvertedPredicatedOpcode(Cond[0].getImm());
3902  Cond[0].setImm(Opc);
3903  return true;
3904 }
3905 
3906 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
3907  int InvPredOpcode;
3908  InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
3909  : Hexagon::getTruePredOpcode(Opc);
3910  if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
3911  return InvPredOpcode;
3912 
3913  llvm_unreachable("Unexpected predicated instruction");
3914 }
3915 
3916 // Returns the max value that doesn't need to be extended.
3918  const uint64_t F = MI.getDesc().TSFlags;
3919  unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
3921  unsigned bits = (F >> HexagonII::ExtentBitsPos)
3923 
3924  if (isSigned) // if value is signed
3925  return ~(-1U << (bits - 1));
3926  else
3927  return ~(-1U << bits);
3928 }
3929 
3931  const uint64_t F = MI.getDesc().TSFlags;
3933 }
3934 
3935 // Returns the min value that doesn't need to be extended.
3937  const uint64_t F = MI.getDesc().TSFlags;
3938  unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
3940  unsigned bits = (F >> HexagonII::ExtentBitsPos)
3942 
3943  if (isSigned) // if value is signed
3944  return -1U << (bits - 1);
3945  else
3946  return 0;
3947 }
3948 
3949 // Returns opcode of the non-extended equivalent instruction.
3951  // Check if the instruction has a register form that uses register in place
3952  // of the extended operand, if so return that as the non-extended form.
3953  short NonExtOpcode = Hexagon::getRegForm(MI.getOpcode());
3954  if (NonExtOpcode >= 0)
3955  return NonExtOpcode;
3956 
3957  if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
3958  // Check addressing mode and retrieve non-ext equivalent instruction.
3959  switch (getAddrMode(MI)) {
3960  case HexagonII::Absolute :
3961  return Hexagon::getBaseWithImmOffset(MI.getOpcode());
3963  return Hexagon::getBaseWithRegOffset(MI.getOpcode());
3965  return Hexagon::getRegShlForm(MI.getOpcode());
3966 
3967  default:
3968  return -1;
3969  }
3970  }
3971  return -1;
3972 }
3973 
3975  unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const {
3976  if (Cond.empty())
3977  return false;
3978  assert(Cond.size() == 2);
3979  if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) {
3980  DEBUG(dbgs() << "No predregs for new-value jumps/endloop");
3981  return false;
3982  }
3983  PredReg = Cond[1].getReg();
3984  PredRegPos = 1;
3985  // See IfConversion.cpp why we add RegState::Implicit | RegState::Undef
3986  PredRegFlags = 0;
3987  if (Cond[1].isImplicit())
3988  PredRegFlags = RegState::Implicit;
3989  if (Cond[1].isUndef())
3990  PredRegFlags |= RegState::Undef;
3991  return true;
3992 }
3993 
3995  return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Pseudo);
3996 }
3997 
3999  return Hexagon::getRegForm(MI.getOpcode());
4000 }
4001 
4002 // Return the number of bytes required to encode the instruction.
4003 // Hexagon instructions are fixed length, 4 bytes, unless they
4004 // use a constant extender, which requires another 4 bytes.
4005 // For debug instructions and prolog labels, return 0.
4007  if (MI.isDebugValue() || MI.isPosition())
4008  return 0;
4009 
4010  unsigned Size = MI.getDesc().getSize();
4011  if (!Size)
4012  // Assume the default insn size in case it cannot be determined
4013  // for whatever reason.
4014  Size = HEXAGON_INSTR_SIZE;
4015 
4016  if (isConstExtended(MI) || isExtended(MI))
4017  Size += HEXAGON_INSTR_SIZE;
4018 
4019  // Try and compute number of instructions in asm.
4021  const MachineBasicBlock &MBB = *MI.getParent();
4022  const MachineFunction *MF = MBB.getParent();
4023  const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
4024 
4025  // Count the number of register definitions to find the asm string.
4026  unsigned NumDefs = 0;
4027  for (; MI.getOperand(NumDefs).isReg() && MI.getOperand(NumDefs).isDef();
4028  ++NumDefs)
4029  assert(NumDefs != MI.getNumOperands()-2 && "No asm string?");
4030 
4031  assert(MI.getOperand(NumDefs).isSymbol() && "No asm string?");
4032  // Disassemble the AsmStr and approximate number of instructions.
4033  const char *AsmStr = MI.getOperand(NumDefs).getSymbolName();
4034  Size = getInlineAsmLength(AsmStr, *MAI);
4035  }
4036 
4037  return Size;
4038 }
4039 
4041  const uint64_t F = MI.getDesc().TSFlags;
4042  return (F >> HexagonII::TypePos) & HexagonII::TypeMask;
4043 }
4044 
4047  const InstrItineraryData &II = *ST.getInstrItineraryData();
4048  const InstrStage &IS = *II.beginStage(MI.getDesc().getSchedClass());
4049 
4050  return IS.getUnits();
4051 }
4052 
4053 unsigned HexagonInstrInfo::getValidSubTargets(const unsigned Opcode) const {
4054  const uint64_t F = get(Opcode).TSFlags;
4056 }
4057 
4058 // Calculate size of the basic block without debug instructions.
4060  return nonDbgMICount(BB->instr_begin(), BB->instr_end());
4061 }
4062 
4064  MachineBasicBlock::const_iterator BundleHead) const {
4065  assert(BundleHead->isBundle() && "Not a bundle header");
4066  auto MII = BundleHead.getInstrIterator();
4067  // Skip the bundle header.
4068  return nonDbgMICount(++MII, getBundleEnd(BundleHead.getInstrIterator()));
4069 }
4070 
4071 /// immediateExtend - Changes the instruction in place to one using an immediate
4072 /// extender.
4074  assert((isExtendable(MI)||isConstExtended(MI)) &&
4075  "Instruction must be extendable");
4076  // Find which operand is extendable.
4077  short ExtOpNum = getCExtOpNum(MI);
4078  MachineOperand &MO = MI.getOperand(ExtOpNum);
4079  // This needs to be something we understand.
4080  assert((MO.isMBB() || MO.isImm()) &&
4081  "Branch with unknown extendable field type");
4082  // Mark given operand as extended.
4084 }
4085 
4087  MachineInstr &MI, MachineBasicBlock *NewTarget) const {
4088  DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to BB#"
4089  << NewTarget->getNumber(); MI.dump(););
4090  assert(MI.isBranch());
4091  unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode());
4092  int TargetPos = MI.getNumOperands() - 1;
4093  // In general branch target is the last operand,
4094  // but some implicit defs added at the end might change it.
4095  while ((TargetPos > -1) && !MI.getOperand(TargetPos).isMBB())
4096  --TargetPos;
4097  assert((TargetPos >= 0) && MI.getOperand(TargetPos).isMBB());
4098  MI.getOperand(TargetPos).setMBB(NewTarget);
4100  NewOpcode = reversePrediction(NewOpcode);
4101  }
4102  MI.setDesc(get(NewOpcode));
4103  return true;
4104 }
4105 
4107  /* +++ The code below is used to generate complete set of Hexagon Insn +++ */
4109  MachineBasicBlock &B = *A;
4111  DebugLoc DL = I->getDebugLoc();
4112  MachineInstr *NewMI;
4113 
4114  for (unsigned insn = TargetOpcode::GENERIC_OP_END+1;
4115  insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
4116  NewMI = BuildMI(B, I, DL, get(insn));
4117  DEBUG(dbgs() << "\n" << getName(NewMI->getOpcode()) <<
4118  " Class: " << NewMI->getDesc().getSchedClass());
4119  NewMI->eraseFromParent();
4120  }
4121  /* --- The code above is used to generate complete set of Hexagon Insn --- */
4122 }
4123 
4124 // inverts the predication logic.
4125 // p -> NotP
4126 // NotP -> P
4128  DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI.dump());
4130  return true;
4131 }
4132 
4133 // Reverse the branch prediction.
4134 unsigned HexagonInstrInfo::reversePrediction(unsigned Opcode) const {
4135  int PredRevOpcode = -1;
4136  if (isPredictedTaken(Opcode))
4137  PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
4138  else
4139  PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
4140  assert(PredRevOpcode > 0);
4141  return PredRevOpcode;
4142 }
4143 
4144 // TODO: Add more rigorous validation.
4146  const {
4147  return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1));
4148 }
4149 
4151  return Hexagon::xformRegToImmOffset(MI.getOpcode());
4152 }
MachineLoop * L
static bool isReg(const MCInst &MI, unsigned OpNo)
bool hasPseudoInstrPair(const MachineInstr &MI) const
bool isImplicit() const
void push_back(const T &Elt)
Definition: SmallVector.h:211
bool isJumpWithinBranchRange(const MachineInstr &MI, unsigned offset) const
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool DefinesPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred) const override
If the specified instruction defines any predicate or condition code register(s) used for predication...
mop_iterator operands_end()
Definition: MachineInstr.h:296
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
bool isIndirectCall(const MachineInstr &MI) const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:870
short getBaseWithLongOffset(short Opcode) const
instr_iterator instr_begin()
const int Hexagon_MEMH_OFFSET_MAX
unsigned reversePrediction(unsigned Opcode) const
instr_iterator instr_end()
const int Hexagon_ADDI_OFFSET_MAX
bool isTC1(const MachineInstr &MI) const
unsigned getFrameRegister(const MachineFunction &MF) const override
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:448
const int Hexagon_MEMH_OFFSET_MIN
size_t i
bool isIndirectL4Return(const MachineInstr &MI) const
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
static void parseOperands(const MachineInstr &MI, SmallVector< unsigned, 4 > &Defs, SmallVector< unsigned, 8 > &Uses)
Gather register def/uses from MI.
MachineBasicBlock * getMBB() const
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
static cl::opt< bool > UseDFAHazardRec("dfa-hazard-rec", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("Use the DFA based hazard recognizer."))
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:216
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
bool isNewValueInst(const MachineInstr &MI) const
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isPredicateLate(unsigned Opcode) const
DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &STI) const override
Create machine specific model for scheduling.
short getPseudoInstrPair(const MachineInstr &MI) const
bool mayStore() const
Return true if this instruction could possibly modify memory.
Definition: MCInstrDesc.h:384
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
Returns true if the first specified predicate subsumes the second, e.g.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
short getNonExtOpcode(const MachineInstr &MI) const
bool isPredicatedTrue(const MachineInstr &MI) const
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:605
Address of indexed Jump Table for switch.
bool PredOpcodeHasJMP_c(unsigned Opcode) const
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const int Hexagon_MEMV_OFFSET_MAX_128B
Constants for Hexagon instructions.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
bool validateBranchCond(const ArrayRef< MachineOperand > &Cond) const
const int Hexagon_ADDI_OFFSET_MIN
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:270
const int Hexagon_MEMV_OFFSET_MIN
demanded bits
bool isMemOp(const MachineInstr &MI) const
MachineBasicBlock reference.
unsigned getCompoundOpcode(const MachineInstr &GA, const MachineInstr &GB) const
const char * getSymbolName() const
bool isTailCall(const MachineInstr &MI) const override
bool isNewValue(const MachineInstr &MI) const
A debug info location.
Definition: DebugLoc.h:34
const int Hexagon_MEMV_AUTOINC_MIN
const int Hexagon_MEMD_OFFSET_MAX
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:301
return AArch64::GPR64RegClass contains(Reg)
iterator_range< succ_iterator > successors()
bool isVecAcc(const MachineInstr &MI) const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const override
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineFunction & MF
Definition: ScheduleDAG.h:581
StringRef getCommentString() const
Definition: MCAsmInfo.h:471
bool isLateInstrFeedsEarlyInstr(const MachineInstr &LRMI, const MachineInstr &ESMI) const
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
Insert a noop into the instruction stream at the specified point.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e...
static cl::opt< bool > BranchRelaxAsmLarge("branch-relax-asm-large", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("branch relax asm"))
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
static cl::opt< bool > DisableNVSchedule("disable-hexagon-nv-schedule", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Disable schedule adjustment for new value stores."))
bool isAbsoluteSet(const MachineInstr &MI) const
A description of a memory reference used in the backend.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
HexagonII::SubInstructionGroup getDuplexCandidateGroup(const MachineInstr &MI) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
struct fuzzer::@269 Flags
const HexagonInstrInfo * TII
bool isExpr(unsigned OpType) const
bool isDependent(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const InstrStage * beginStage(unsigned ItinClassIndx) const
Return the first stage of the itinerary.
MachineBasicBlock * getBottomBlock()
Return the "bottom" block in the loop, which is the last block in the linear layout, ignoring any parts of the loop not contiguous with the part that contains the header.
bool isTC4x(const MachineInstr &MI) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:592
short getAbsoluteForm(const MachineInstr &MI) const
Name of external global symbol.
Reg
All possible values of the reg field in the ModR/M byte.
static StringRef getName(Value *V)
bool isSpillPredRegOp(const MachineInstr &MI) const
unsigned getUnits() const
Returns the choice of FUs.
bool isLateResultInstr(const MachineInstr &MI) const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
short xformRegToImmOffset(const MachineInstr &MI) const
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
Definition: MCInstrDesc.h:255
bool isUndef() const
bool predOpcodeHasNot(ArrayRef< MachineOperand > Cond) const
bool isCondInst(const MachineInstr &MI) const
const int Hexagon_MEMH_AUTOINC_MIN
bool isHVXMemWithAIndirect(const MachineInstr &I, const MachineInstr &J) const
bool isLoopN(const MachineInstr &MI) const
const int Hexagon_MEMB_AUTOINC_MAX
int getMaxValue(const MachineInstr &MI) const
virtual const InstrItineraryData * getInstrItineraryData() const
getInstrItineraryData - Returns instruction itinerary data for the target or specific subtarget...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:589
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:60
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:277
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:292
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool expandPostRAPseudo(MachineInstr &MI) const override
This function is called for all pseudo instructions that remain after register allocation.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Cond) const override
Convert the instruction into a predicated instruction.
void RemoveOperand(unsigned i)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
const HexagonRegisterInfo & getRegisterInfo() const
HexagonInstrInfo specifics.
void genAllInsnTimingClasses(MachineFunction &MF) const
#define F(x, y, z)
Definition: MD5.cpp:51
bool isKill() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isComplex(const MachineInstr &MI) const
bool isConditionalALU32(const MachineInstr &MI) const
bool isAccumulator(const MachineInstr &MI) const
MachineBasicBlock * MBB
const RegList & Regs
bool canExecuteInBundle(const MachineInstr &First, const MachineInstr &Second) const
Can these instructions execute at the same time in a bundle.
bool isPredictedTaken(unsigned Opcode) const
unsigned nonDbgBundleSize(MachineBasicBlock::const_iterator BundleHead) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
const int Hexagon_MEMD_AUTOINC_MIN
unsigned getInstrTimingClassLatency(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Itinerary data supplied by a subtarget to be used by a target.
bool isSaveCalleeSavedRegsCall(const MachineInstr &MI) const
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
bool invertAndChangeJumpTarget(MachineInstr &MI, MachineBasicBlock *NewTarget) const
bool isEarlySourceInstr(const MachineInstr &MI) const
std::vector< MachineBasicBlock * >::iterator pred_iterator
const int Hexagon_MEMV_AUTOINC_MAX
int64_t getImm() const
unsigned getInvertedPredicatedOpcode(const int Opc) const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:135
Printable PrintReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubRegIdx=0)
Prints virtual and physical registers with or without a TRI instance.
unsigned getUndefRegState(bool B)
HexagonII::CompoundGroup getCompoundCandidateGroup(const MachineInstr &MI) const
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
Compute the instruction latency of a given instruction.
bool isJumpR(const MachineInstr &MI) const
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:141
const int Hexagon_MEMW_AUTOINC_MAX
reverse_iterator rbegin()
unsigned getKillRegState(bool B)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:273
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
const int Hexagon_MEMD_OFFSET_MIN
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:131
TargetInstrInfo - Interface to description of machine instruction set.
bool isDebugValue() const
Definition: MachineInstr.h:777
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:57
static cl::opt< bool > EnableBranchPrediction("hexagon-enable-branch-prediction", cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"))
int getDotOldOp(const int opc) const
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:359
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
Return true if it's profitable for if-converter to duplicate instructions of specified accumulated in...
bool isVecALU(const MachineInstr &MI) const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Address of a global value.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:395
unsigned getTargetFlags() const
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:420
const MachineInstrBuilder & setMemRefs(MachineInstr::mmo_iterator b, MachineInstr::mmo_iterator e) const
const int Hexagon_MEMW_OFFSET_MAX
const int Hexagon_MEMH_AUTOINC_MAX
unsigned getCExtOpNum(const MachineInstr &MI) const
unsigned const MachineRegisterInfo * MRI
int getDotNewPredOp(const MachineInstr &MI, const MachineBranchProbabilityInfo *MBPI) const
MVT - Machine Value Type.
static cl::opt< bool > EnableALUForwarding("enable-alu-forwarding", cl::Hidden, cl::init(true), cl::desc("Enable vec alu forwarding"))
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
bool getIncrementValue(const MachineInstr &MI, int &Value) const override
If the instruction is an increment of a constant value, return the amount.
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, unsigned &SrcReg2, int &Mask, int &Value) const override
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
bool reversePredSense(MachineInstr &MI) const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
bool isLateSourceInstr(const MachineInstr &MI) const
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:115
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:279
bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const override
For instructions with a base and offset, return the position of the base register and offset operands...
bool isNewValueStore(const MachineInstr &MI) const
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
Definition: MCInstrDesc.h:560
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
bool isIndirectBranch(QueryType Type=AnyInBundle) const
Return true if this is an indirect branch, such as a branch through a register.
Definition: MachineInstr.h:454
void setMBB(MachineBasicBlock *MBB)
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle): Remove Defs, add uses.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
LLVM_NODISCARD size_t count(char C) const
Return the number of occurrences of C in the string.
Definition: StringRef.h:473
bool isPosition() const
Definition: MachineInstr.h:775
Address of a basic block.
uint32_t Offset
static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI)
unsigned getSize(const MachineInstr &MI) const
static const unsigned End
void setImm(int64_t immVal)
unsigned createVR(MachineFunction *MF, MVT VT) const
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineInstr * getFirstNonDbgInst(MachineBasicBlock *BB) const
self_iterator getIterator()
Definition: ilist_node.h:81
bool isVecUsableNextPacket(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:136
bool isTC2(const MachineInstr &MI) const
EVT - Extended Value Type.
Definition: ValueTypes.h:31
bool isToBeScheduledASAP(const MachineInstr &MI1, const MachineInstr &MI2) const
bool isDotCurInst(const MachineInstr &MI) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isExtended(const MachineInstr &MI) const
MCSubRegIterator enumerates all sub-registers of Reg.
static cl::opt< unsigned > MaxIter("bb-vectorize-max-iter", cl::init(0), cl::Hidden, cl::desc("The maximum number of pairing iterations"))
bool getPredReg(ArrayRef< MachineOperand > Cond, unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const
cl::opt< bool > ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden, cl::init(false), cl::desc("Do not consider inline-asm a scheduling/""packetization boundary."))
bool isPostIncrement(const MachineInstr &MI) const override
Return true for post-incremented instructions.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool hasUncondBranch(const MachineBasicBlock *B) const
bool hasNonExtEquivalent(const MachineInstr &MI) const
static cl::opt< bool > EnableACCForwarding("enable-acc-forwarding", cl::Hidden, cl::init(true), cl::desc("Enable vec acc forwarding"))
bool isConditionalTransfer(const MachineInstr &MI) const
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
void setIsKill(bool Val=true)
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE instructions.
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:203
The memory access writes data.
short getEquivalentHWInstr(const MachineInstr &MI) const
bool isDuplexPair(const MachineInstr &MIa, const MachineInstr &MIb) const
Symmetrical. See if these two instructions are fit for duplex pair.
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
Definition: MachineInstr.h:865
unsigned getMemAccessSize(const MachineInstr &MI) const
unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr * > &PrevInsts, unsigned Iter, unsigned MaxIter) const override
Generate code to reduce the loop iteration by one and check if the loop is finished.
bool isZeroExtendingLoad(const MachineInstr &MI) const
static bool isIntRegForSubInst(unsigned Reg)
bool hasEHLabel(const MachineBasicBlock *B) const
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:425
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
const int Hexagon_MEMW_AUTOINC_MIN
HexagonInstrInfo(HexagonSubtarget &ST)
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
static MachineInstr * findLoopInstr(MachineBasicBlock *BB, int EndLoopOp, SmallPtrSet< MachineBasicBlock *, 8 > &Visited)
Find the hardware loop instruction used to set-up the specified loop.
bool predCanBeUsedAsDotNew(const MachineInstr &MI, unsigned PredReg) const
const int Hexagon_MEMV_OFFSET_MAX
MachineOperand class - Representation of each machine instruction operand.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isCompoundBranchInstr(const MachineInstr &MI) const
int getCondOpcode(int Opc, bool sense) const
bool addLatencyToSchedule(const MachineInstr &MI1, const MachineInstr &MI2) const
bool isInlineAsm() const
Definition: MachineInstr.h:789
bool isTC2Early(const MachineInstr &MI) const
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
static void getLiveRegsAt(LivePhysRegs &Regs, const MachineInstr &MI)
bool isPredicable(MachineInstr &MI) const override
Return true if the specified instruction can be predicated.
const int Hexagon_MEMV_OFFSET_MIN_128B
bool mayLoad() const
Return true if this instruction could possibly read memory.
Definition: MCInstrDesc.h:378
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
void dump(const TargetInstrInfo *TII=nullptr) const
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
Return true if it's profitable to predicate instructions with accumulated instruction latency of "Num...
bool isV60VectorInstruction(const MachineInstr &MI) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
Definition: MachineInstr.h:833
bool isCompare(QueryType Type=IgnoreBundle) const
Return true if this instruction is a comparison.
Definition: MachineInstr.h:485
unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const override
Measure the specified inline asm to determine an approximation of its length.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SynchronizationScope SynchScope=CrossThread, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
static bool isDuplexPairMatch(unsigned Ga, unsigned Gb)
bool getInvertedPredSense(SmallVectorImpl< MachineOperand > &Cond) const
unsigned getBaseAndOffset(const MachineInstr &MI, int &Offset, unsigned &AccessSize) const
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Test if the given instruction should be considered a scheduling boundary.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool isEndLoopN(unsigned Opcode) const
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Remove the branching code at the end of the specific MBB.
bool isSignExtendingLoad(const MachineInstr &MI) const
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:250
bool isPredicatedNew(const MachineInstr &MI) const
bool isDeallocRet(const MachineInstr &MI) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
TargetSubtargetInfo - Generic base class for all target subtargets.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool isFloat(const MachineInstr &MI) const
Representation of each machine instruction.
Definition: MachineInstr.h:52
unsigned getAddrMode(const MachineInstr &MI) const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:119
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:556
void addTargetFlag(unsigned F)
bool isDotNewInst(const MachineInstr &MI) const
static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB, MachineBasicBlock::const_instr_iterator MIE)
Calculate number of instructions excluding the debug instructions.
unsigned nonDbgBBSize(const MachineBasicBlock *BB) const
getInstrTimingClassLatency - Compute the instruction latency of a given instruction using Timing Clas...
const int Hexagon_MEMB_OFFSET_MAX
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
A set of live physical registers with functions to track liveness when walking backward/forward throu...
Definition: LivePhysRegs.h:45
bool isValidAutoIncImm(const EVT VT, const int Offset) const
bool isExtendable(const MachineInstr &MI) const
int getDotNewOp(const MachineInstr &MI) const
short getRegForm(const MachineInstr &MI) const
These values represent a non-pipelined step in the execution of an instruction.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:368
short getBaseWithRegOffset(const MachineInstr &MI) const
const char * getSeparatorString() const
Definition: MCAsmInfo.h:465
unsigned getValidSubTargets(const unsigned Opcode) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
static MachineOperand CreateImm(int64_t Val)
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:424
void immediateExtend(MachineInstr &MI) const
immediateExtend - Changes the instruction in place to one using an immediate extender.
void clearKillFlags(unsigned Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const int Hexagon_MEMW_OFFSET_MIN
SmallVector< MachineInstr *, 2 > getBranchingInstrs(MachineBasicBlock &MBB) const
int getDotCurOp(const MachineInstr &MI) const
bool isConditionalStore(const MachineInstr &MI) const
bool isConditionalLoad(const MachineInstr &MI) const
bool mayBeNewStore(const MachineInstr &MI) const
bool isSolo(const MachineInstr &MI) const
unsigned getReg() const
getReg - Returns the register number.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isBranch(unsigned Opcode)
bool doesNotReturn(const MachineInstr &CallMI) const
bool producesStall(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
bool isOperandExtended(const MachineInstr &MI, unsigned OperandNum) const
#define HEXAGON_INSTR_SIZE
Definition: Hexagon.h:30
virtual const TargetInstrInfo * getInstrInfo() const
LLVM Value Representation.
Definition: Value.h:71
const int Hexagon_MEMV_AUTOINC_MIN_128B
unsigned getMaxInstLength() const
Definition: MCAsmInfo.h:462
mop_iterator operands_begin()
Definition: MachineInstr.h:295
bool mayBeCurLoad(const MachineInstr &MI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
bool available(const MachineRegisterInfo &MRI, unsigned Reg) const
Returns true if register Reg and no aliasing register is in the set.
MachineInstrBundleIterator< const MachineInstr > const_iterator
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:81
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
BasicBlockListType::iterator iterator
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:125
#define DEBUG(X)
Definition: Debug.h:100
static cl::opt< bool > EnableTimingClassLatency("enable-timing-class-latency", cl::Hidden, cl::init(false), cl::desc("Enable timing class latency"))
int getMinValue(const MachineInstr &MI) const
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
Address of indexed Constant in Constant Pool.
const int Hexagon_MEMD_AUTOINC_MAX
const int Hexagon_MEMV_AUTOINC_MAX_128B
MachineBasicBlock::instr_iterator getBundleEnd(MachineBasicBlock::instr_iterator I)
Returns an iterator pointing beyond the bundle containing I.
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI) const override
Get the base register and byte offset of a load/store instr.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
bool isValidOffset(unsigned Opcode, int Offset, bool Extend=true) const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const int Hexagon_MEMB_AUTOINC_MIN
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, MachineInstr *&CmpInst) const override
Analyze the loop code, return true if it cannot be understood.
bool isConstExtended(const MachineInstr &MI) const
bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA=nullptr) const override
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
uint64_t getType(const MachineInstr &MI) const
const int Hexagon_MEMB_OFFSET_MIN
unsigned getUnits(const MachineInstr &MI) const
bool isNewValueJump(const MachineInstr &MI) const
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:358
int getDotNewPredJumpOp(const MachineInstr &MI, const MachineBranchProbabilityInfo *MBPI) const