Bug Summary

File:build/source/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
Warning:line 1088, column 43
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name HexagonInstrInfo.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-17/lib/clang/17 -D _DEBUG -D _GLIBCXX_ASSERTIONS -D _GNU_SOURCE -D _LIBCPP_ENABLE_ASSERTIONS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/Hexagon -I /build/source/llvm/lib/Target/Hexagon -I include -I /build/source/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/source/= -source-date-epoch 1683717183 -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-05-10-133810-16478-1 -x c++ /build/source/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
1//===- HexagonInstrInfo.cpp - Hexagon Instruction Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the Hexagon implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "HexagonInstrInfo.h"
14#include "Hexagon.h"
15#include "HexagonFrameLowering.h"
16#include "HexagonHazardRecognizer.h"
17#include "HexagonRegisterInfo.h"
18#include "HexagonSubtarget.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/CodeGen/DFAPacketizer.h"
24#include "llvm/CodeGen/LivePhysRegs.h"
25#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstr.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineInstrBundle.h"
32#include "llvm/CodeGen/MachineLoopInfo.h"
33#include "llvm/CodeGen/MachineMemOperand.h"
34#include "llvm/CodeGen/MachineOperand.h"
35#include "llvm/CodeGen/MachineRegisterInfo.h"
36#include "llvm/CodeGen/MachineValueType.h"
37#include "llvm/CodeGen/ScheduleDAG.h"
38#include "llvm/CodeGen/TargetInstrInfo.h"
39#include "llvm/CodeGen/TargetOpcodes.h"
40#include "llvm/CodeGen/TargetRegisterInfo.h"
41#include "llvm/CodeGen/TargetSubtargetInfo.h"
42#include "llvm/IR/DebugLoc.h"
43#include "llvm/MC/MCAsmInfo.h"
44#include "llvm/MC/MCInstBuilder.h"
45#include "llvm/MC/MCInstrDesc.h"
46#include "llvm/MC/MCInstrItineraries.h"
47#include "llvm/MC/MCRegisterInfo.h"
48#include "llvm/Support/BranchProbability.h"
49#include "llvm/Support/CommandLine.h"
50#include "llvm/Support/Debug.h"
51#include "llvm/Support/ErrorHandling.h"
52#include "llvm/Support/MathExtras.h"
53#include "llvm/Support/raw_ostream.h"
54#include "llvm/Target/TargetMachine.h"
55#include <cassert>
56#include <cctype>
57#include <cstdint>
58#include <cstring>
59#include <iterator>
60#include <optional>
61#include <string>
62#include <utility>
63
64using namespace llvm;
65
66#define DEBUG_TYPE"hexagon-instrinfo" "hexagon-instrinfo"
67
68#define GET_INSTRINFO_CTOR_DTOR
69#define GET_INSTRMAP_INFO
70#include "HexagonDepTimingClasses.h"
71#include "HexagonGenDFAPacketizer.inc"
72#include "HexagonGenInstrInfo.inc"
73
74cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
75 cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
76 "packetization boundary."));
77
78static cl::opt<bool> EnableBranchPrediction("hexagon-enable-branch-prediction",
79 cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"));
80
81static cl::opt<bool> DisableNVSchedule(
82 "disable-hexagon-nv-schedule", cl::Hidden,
83 cl::desc("Disable schedule adjustment for new value stores."));
84
85static cl::opt<bool> EnableTimingClassLatency(
86 "enable-timing-class-latency", cl::Hidden, cl::init(false),
87 cl::desc("Enable timing class latency"));
88
89static cl::opt<bool> EnableALUForwarding(
90 "enable-alu-forwarding", cl::Hidden, cl::init(true),
91 cl::desc("Enable vec alu forwarding"));
92
93static cl::opt<bool> EnableACCForwarding(
94 "enable-acc-forwarding", cl::Hidden, cl::init(true),
95 cl::desc("Enable vec acc forwarding"));
96
97static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large",
98 cl::init(true), cl::Hidden,
99 cl::desc("branch relax asm"));
100
101static cl::opt<bool>
102 UseDFAHazardRec("dfa-hazard-rec", cl::init(true), cl::Hidden,
103 cl::desc("Use the DFA based hazard recognizer."));
104
105/// Constants for Hexagon instructions.
106const int Hexagon_MEMW_OFFSET_MAX = 4095;
107const int Hexagon_MEMW_OFFSET_MIN = -4096;
108const int Hexagon_MEMD_OFFSET_MAX = 8191;
109const int Hexagon_MEMD_OFFSET_MIN = -8192;
110const int Hexagon_MEMH_OFFSET_MAX = 2047;
111const int Hexagon_MEMH_OFFSET_MIN = -2048;
112const int Hexagon_MEMB_OFFSET_MAX = 1023;
113const int Hexagon_MEMB_OFFSET_MIN = -1024;
114const int Hexagon_ADDI_OFFSET_MAX = 32767;
115const int Hexagon_ADDI_OFFSET_MIN = -32768;
116
117// Pin the vtable to this file.
118void HexagonInstrInfo::anchor() {}
119
120HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
121 : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
122 Subtarget(ST) {}
123
124namespace llvm {
125namespace HexagonFUnits {
126 bool isSlot0Only(unsigned units);
127}
128}
129
130static bool isIntRegForSubInst(Register Reg) {
131 return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
132 (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
133}
134
135static bool isDblRegForSubInst(Register Reg, const HexagonRegisterInfo &HRI) {
136 return isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_lo)) &&
137 isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_hi));
138}
139
140/// Calculate number of instructions excluding the debug instructions.
141static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB,
142 MachineBasicBlock::const_instr_iterator MIE) {
143 unsigned Count = 0;
144 for (; MIB != MIE; ++MIB) {
145 if (!MIB->isDebugInstr())
146 ++Count;
147 }
148 return Count;
149}
150
151// Check if the A2_tfrsi instruction is cheap or not. If the operand has
152// to be constant-extendend it is not cheap since it occupies two slots
153// in a packet.
154bool HexagonInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
155 // Enable the following steps only at Os/Oz
156 if (!(MI.getMF()->getFunction().hasOptSize()))
157 return MI.isAsCheapAsAMove();
158
159 if (MI.getOpcode() == Hexagon::A2_tfrsi) {
160 auto Op = MI.getOperand(1);
161 // If the instruction has a global address as operand, it is not cheap
162 // since the operand will be constant extended.
163 if (Op.isGlobal())
164 return false;
165 // If the instruction has an operand of size > 16bits, its will be
166 // const-extended and hence, it is not cheap.
167 if (Op.isImm()) {
168 int64_t Imm = Op.getImm();
169 if (!isInt<16>(Imm))
170 return false;
171 }
172 }
173 return MI.isAsCheapAsAMove();
174}
175
176// Do not sink floating point instructions that updates USR register.
177// Example:
178// feclearexcept
179// F2_conv_w2sf
180// fetestexcept
181// MachineSink sinks F2_conv_w2sf and we are not able to catch exceptions.
182// TODO: On some of these floating point instructions, USR is marked as Use.
183// In reality, these instructions also Def the USR. If USR is marked as Def,
184// some of the assumptions in assembler packetization are broken.
185bool HexagonInstrInfo::shouldSink(const MachineInstr &MI) const {
186 // Assumption: A floating point instruction that reads the USR will write
187 // the USR as well.
188 if (isFloat(MI) && MI.hasRegisterImplicitUseOperand(Hexagon::USR))
189 return false;
190 return true;
191}
192
193/// Find the hardware loop instruction used to set-up the specified loop.
194/// On Hexagon, we have two instructions used to set-up the hardware loop
195/// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions
196/// to indicate the end of a loop.
197MachineInstr *HexagonInstrInfo::findLoopInstr(MachineBasicBlock *BB,
198 unsigned EndLoopOp, MachineBasicBlock *TargetBB,
199 SmallPtrSet<MachineBasicBlock *, 8> &Visited) const {
200 unsigned LOOPi;
201 unsigned LOOPr;
202 if (EndLoopOp == Hexagon::ENDLOOP0) {
203 LOOPi = Hexagon::J2_loop0i;
204 LOOPr = Hexagon::J2_loop0r;
205 } else { // EndLoopOp == Hexagon::EndLOOP1
206 LOOPi = Hexagon::J2_loop1i;
207 LOOPr = Hexagon::J2_loop1r;
208 }
209
210 // The loop set-up instruction will be in a predecessor block
211 for (MachineBasicBlock *PB : BB->predecessors()) {
212 // If this has been visited, already skip it.
213 if (!Visited.insert(PB).second)
214 continue;
215 if (PB == BB)
216 continue;
217 for (MachineInstr &I : llvm::reverse(PB->instrs())) {
218 unsigned Opc = I.getOpcode();
219 if (Opc == LOOPi || Opc == LOOPr)
220 return &I;
221 // We've reached a different loop, which means the loop01 has been
222 // removed.
223 if (Opc == EndLoopOp && I.getOperand(0).getMBB() != TargetBB)
224 return nullptr;
225 }
226 // Check the predecessors for the LOOP instruction.
227 if (MachineInstr *Loop = findLoopInstr(PB, EndLoopOp, TargetBB, Visited))
228 return Loop;
229 }
230 return nullptr;
231}
232
233/// Gather register def/uses from MI.
234/// This treats possible (predicated) defs as actually happening ones
235/// (conservatively).
236static inline void parseOperands(const MachineInstr &MI,
237 SmallVectorImpl<Register> &Defs, SmallVectorImpl<Register> &Uses) {
238 Defs.clear();
239 Uses.clear();
240
241 for (const MachineOperand &MO : MI.operands()) {
242 if (!MO.isReg())
243 continue;
244
245 Register Reg = MO.getReg();
246 if (!Reg)
247 continue;
248
249 if (MO.isUse())
250 Uses.push_back(MO.getReg());
251
252 if (MO.isDef())
253 Defs.push_back(MO.getReg());
254 }
255}
256
257// Position dependent, so check twice for swap.
258static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) {
259 switch (Ga) {
260 case HexagonII::HSIG_None:
261 default:
262 return false;
263 case HexagonII::HSIG_L1:
264 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_A);
265 case HexagonII::HSIG_L2:
266 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
267 Gb == HexagonII::HSIG_A);
268 case HexagonII::HSIG_S1:
269 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
270 Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_A);
271 case HexagonII::HSIG_S2:
272 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
273 Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_S2 ||
274 Gb == HexagonII::HSIG_A);
275 case HexagonII::HSIG_A:
276 return (Gb == HexagonII::HSIG_A);
277 case HexagonII::HSIG_Compound:
278 return (Gb == HexagonII::HSIG_Compound);
279 }
280 return false;
281}
282
283/// isLoadFromStackSlot - If the specified machine instruction is a direct
284/// load from a stack slot, return the virtual or physical register number of
285/// the destination along with the FrameIndex of the loaded stack slot. If
286/// not, return 0. This predicate must return 0 if the instruction has
287/// any side effects other than loading from the stack slot.
288unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
289 int &FrameIndex) const {
290 switch (MI.getOpcode()) {
291 default:
292 break;
293 case Hexagon::L2_loadri_io:
294 case Hexagon::L2_loadrd_io:
295 case Hexagon::V6_vL32b_ai:
296 case Hexagon::V6_vL32b_nt_ai:
297 case Hexagon::V6_vL32Ub_ai:
298 case Hexagon::LDriw_pred:
299 case Hexagon::LDriw_ctr:
300 case Hexagon::PS_vloadrq_ai:
301 case Hexagon::PS_vloadrw_ai:
302 case Hexagon::PS_vloadrw_nt_ai: {
303 const MachineOperand OpFI = MI.getOperand(1);
304 if (!OpFI.isFI())
305 return 0;
306 const MachineOperand OpOff = MI.getOperand(2);
307 if (!OpOff.isImm() || OpOff.getImm() != 0)
308 return 0;
309 FrameIndex = OpFI.getIndex();
310 return MI.getOperand(0).getReg();
311 }
312
313 case Hexagon::L2_ploadrit_io:
314 case Hexagon::L2_ploadrif_io:
315 case Hexagon::L2_ploadrdt_io:
316 case Hexagon::L2_ploadrdf_io: {
317 const MachineOperand OpFI = MI.getOperand(2);
318 if (!OpFI.isFI())
319 return 0;
320 const MachineOperand OpOff = MI.getOperand(3);
321 if (!OpOff.isImm() || OpOff.getImm() != 0)
322 return 0;
323 FrameIndex = OpFI.getIndex();
324 return MI.getOperand(0).getReg();
325 }
326 }
327
328 return 0;
329}
330
331/// isStoreToStackSlot - If the specified machine instruction is a direct
332/// store to a stack slot, return the virtual or physical register number of
333/// the source reg along with the FrameIndex of the loaded stack slot. If
334/// not, return 0. This predicate must return 0 if the instruction has
335/// any side effects other than storing to the stack slot.
336unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
337 int &FrameIndex) const {
338 switch (MI.getOpcode()) {
339 default:
340 break;
341 case Hexagon::S2_storerb_io:
342 case Hexagon::S2_storerh_io:
343 case Hexagon::S2_storeri_io:
344 case Hexagon::S2_storerd_io:
345 case Hexagon::V6_vS32b_ai:
346 case Hexagon::V6_vS32Ub_ai:
347 case Hexagon::STriw_pred:
348 case Hexagon::STriw_ctr:
349 case Hexagon::PS_vstorerq_ai:
350 case Hexagon::PS_vstorerw_ai: {
351 const MachineOperand &OpFI = MI.getOperand(0);
352 if (!OpFI.isFI())
353 return 0;
354 const MachineOperand &OpOff = MI.getOperand(1);
355 if (!OpOff.isImm() || OpOff.getImm() != 0)
356 return 0;
357 FrameIndex = OpFI.getIndex();
358 return MI.getOperand(2).getReg();
359 }
360
361 case Hexagon::S2_pstorerbt_io:
362 case Hexagon::S2_pstorerbf_io:
363 case Hexagon::S2_pstorerht_io:
364 case Hexagon::S2_pstorerhf_io:
365 case Hexagon::S2_pstorerit_io:
366 case Hexagon::S2_pstorerif_io:
367 case Hexagon::S2_pstorerdt_io:
368 case Hexagon::S2_pstorerdf_io: {
369 const MachineOperand &OpFI = MI.getOperand(1);
370 if (!OpFI.isFI())
371 return 0;
372 const MachineOperand &OpOff = MI.getOperand(2);
373 if (!OpOff.isImm() || OpOff.getImm() != 0)
374 return 0;
375 FrameIndex = OpFI.getIndex();
376 return MI.getOperand(3).getReg();
377 }
378 }
379
380 return 0;
381}
382
383/// This function checks if the instruction or bundle of instructions
384/// has load from stack slot and returns frameindex and machine memory
385/// operand of that instruction if true.
386bool HexagonInstrInfo::hasLoadFromStackSlot(
387 const MachineInstr &MI,
388 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
389 if (MI.isBundle()) {
390 const MachineBasicBlock *MBB = MI.getParent();
391 MachineBasicBlock::const_instr_iterator MII = MI.getIterator();
392 for (++MII; MII != MBB->instr_end() && MII->isInsideBundle(); ++MII)
393 if (TargetInstrInfo::hasLoadFromStackSlot(*MII, Accesses))
394 return true;
395 return false;
396 }
397
398 return TargetInstrInfo::hasLoadFromStackSlot(MI, Accesses);
399}
400
401/// This function checks if the instruction or bundle of instructions
402/// has store to stack slot and returns frameindex and machine memory
403/// operand of that instruction if true.
404bool HexagonInstrInfo::hasStoreToStackSlot(
405 const MachineInstr &MI,
406 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
407 if (MI.isBundle()) {
408 const MachineBasicBlock *MBB = MI.getParent();
409 MachineBasicBlock::const_instr_iterator MII = MI.getIterator();
410 for (++MII; MII != MBB->instr_end() && MII->isInsideBundle(); ++MII)
411 if (TargetInstrInfo::hasStoreToStackSlot(*MII, Accesses))
412 return true;
413 return false;
414 }
415
416 return TargetInstrInfo::hasStoreToStackSlot(MI, Accesses);
417}
418
419/// This function can analyze one/two way branching only and should (mostly) be
420/// called by target independent side.
421/// First entry is always the opcode of the branching instruction, except when
422/// the Cond vector is supposed to be empty, e.g., when analyzeBranch fails, a
423/// BB with only unconditional jump. Subsequent entries depend upon the opcode,
424/// e.g. Jump_c p will have
425/// Cond[0] = Jump_c
426/// Cond[1] = p
427/// HW-loop ENDLOOP:
428/// Cond[0] = ENDLOOP
429/// Cond[1] = MBB
430/// New value jump:
431/// Cond[0] = Hexagon::CMPEQri_f_Jumpnv_t_V4 -- specific opcode
432/// Cond[1] = R
433/// Cond[2] = Imm
434bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
435 MachineBasicBlock *&TBB,
436 MachineBasicBlock *&FBB,
437 SmallVectorImpl<MachineOperand> &Cond,
438 bool AllowModify) const {
439 TBB = nullptr;
440 FBB = nullptr;
441 Cond.clear();
442
443 // If the block has no terminators, it just falls into the block after it.
444 MachineBasicBlock::instr_iterator I = MBB.instr_end();
445 if (I == MBB.instr_begin())
446 return false;
447
448 // A basic block may looks like this:
449 //
450 // [ insn
451 // EH_LABEL
452 // insn
453 // insn
454 // insn
455 // EH_LABEL
456 // insn ]
457 //
458 // It has two succs but does not have a terminator
459 // Don't know how to handle it.
460 do {
461 --I;
462 if (I->isEHLabel())
463 // Don't analyze EH branches.
464 return true;
465 } while (I != MBB.instr_begin());
466
467 I = MBB.instr_end();
468 --I;
469
470 while (I->isDebugInstr()) {
471 if (I == MBB.instr_begin())
472 return false;
473 --I;
474 }
475
476 bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump &&
477 I->getOperand(0).isMBB();
478 // Delete the J2_jump if it's equivalent to a fall-through.
479 if (AllowModify && JumpToBlock &&
480 MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
481 LLVM_DEBUG(dbgs() << "\nErasing the jump to successor block\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nErasing the jump to successor block\n"
;; } } while (false)
;
482 I->eraseFromParent();
483 I = MBB.instr_end();
484 if (I == MBB.instr_begin())
485 return false;
486 --I;
487 }
488 if (!isUnpredicatedTerminator(*I))
489 return false;
490
491 // Get the last instruction in the block.
492 MachineInstr *LastInst = &*I;
493 MachineInstr *SecondLastInst = nullptr;
494 // Find one more terminator if present.
495 while (true) {
496 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
497 if (!SecondLastInst)
498 SecondLastInst = &*I;
499 else
500 // This is a third branch.
501 return true;
502 }
503 if (I == MBB.instr_begin())
504 break;
505 --I;
506 }
507
508 int LastOpcode = LastInst->getOpcode();
509 int SecLastOpcode = SecondLastInst ? SecondLastInst->getOpcode() : 0;
510 // If the branch target is not a basic block, it could be a tail call.
511 // (It is, if the target is a function.)
512 if (LastOpcode == Hexagon::J2_jump && !LastInst->getOperand(0).isMBB())
513 return true;
514 if (SecLastOpcode == Hexagon::J2_jump &&
515 !SecondLastInst->getOperand(0).isMBB())
516 return true;
517
518 bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
519 bool LastOpcodeHasNVJump = isNewValueJump(*LastInst);
520
521 if (LastOpcodeHasJMP_c && !LastInst->getOperand(1).isMBB())
522 return true;
523
524 // If there is only one terminator instruction, process it.
525 if (LastInst && !SecondLastInst) {
526 if (LastOpcode == Hexagon::J2_jump) {
527 TBB = LastInst->getOperand(0).getMBB();
528 return false;
529 }
530 if (isEndLoopN(LastOpcode)) {
531 TBB = LastInst->getOperand(0).getMBB();
532 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
533 Cond.push_back(LastInst->getOperand(0));
534 return false;
535 }
536 if (LastOpcodeHasJMP_c) {
537 TBB = LastInst->getOperand(1).getMBB();
538 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
539 Cond.push_back(LastInst->getOperand(0));
540 return false;
541 }
542 // Only supporting rr/ri versions of new-value jumps.
543 if (LastOpcodeHasNVJump && (LastInst->getNumExplicitOperands() == 3)) {
544 TBB = LastInst->getOperand(2).getMBB();
545 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
546 Cond.push_back(LastInst->getOperand(0));
547 Cond.push_back(LastInst->getOperand(1));
548 return false;
549 }
550 LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nCant analyze " <<
printMBBReference(MBB) << " with one jump\n";; } } while
(false)
551 << " with one jump\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nCant analyze " <<
printMBBReference(MBB) << " with one jump\n";; } } while
(false)
;
552 // Otherwise, don't know what this is.
553 return true;
554 }
555
556 bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
557 bool SecLastOpcodeHasNVJump = isNewValueJump(*SecondLastInst);
558 if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
559 if (!SecondLastInst->getOperand(1).isMBB())
560 return true;
561 TBB = SecondLastInst->getOperand(1).getMBB();
562 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
563 Cond.push_back(SecondLastInst->getOperand(0));
564 FBB = LastInst->getOperand(0).getMBB();
565 return false;
566 }
567
568 // Only supporting rr/ri versions of new-value jumps.
569 if (SecLastOpcodeHasNVJump &&
570 (SecondLastInst->getNumExplicitOperands() == 3) &&
571 (LastOpcode == Hexagon::J2_jump)) {
572 TBB = SecondLastInst->getOperand(2).getMBB();
573 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
574 Cond.push_back(SecondLastInst->getOperand(0));
575 Cond.push_back(SecondLastInst->getOperand(1));
576 FBB = LastInst->getOperand(0).getMBB();
577 return false;
578 }
579
580 // If the block ends with two Hexagon:JMPs, handle it. The second one is not
581 // executed, so remove it.
582 if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
583 TBB = SecondLastInst->getOperand(0).getMBB();
584 I = LastInst->getIterator();
585 if (AllowModify)
586 I->eraseFromParent();
587 return false;
588 }
589
590 // If the block ends with an ENDLOOP, and J2_jump, handle it.
591 if (isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
592 TBB = SecondLastInst->getOperand(0).getMBB();
593 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
594 Cond.push_back(SecondLastInst->getOperand(0));
595 FBB = LastInst->getOperand(0).getMBB();
596 return false;
597 }
598 LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nCant analyze " <<
printMBBReference(MBB) << " with two jumps";; } } while
(false)
599 << " with two jumps";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nCant analyze " <<
printMBBReference(MBB) << " with two jumps";; } } while
(false)
;
600 // Otherwise, can't handle this.
601 return true;
602}
603
604unsigned HexagonInstrInfo::removeBranch(MachineBasicBlock &MBB,
605 int *BytesRemoved) const {
606 assert(!BytesRemoved && "code size not handled")(static_cast <bool> (!BytesRemoved && "code size not handled"
) ? void (0) : __assert_fail ("!BytesRemoved && \"code size not handled\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 606, __extension__
__PRETTY_FUNCTION__))
;
607
608 LLVM_DEBUG(dbgs() << "\nRemoving branches out of " << printMBBReference(MBB))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nRemoving branches out of "
<< printMBBReference(MBB); } } while (false)
;
609 MachineBasicBlock::iterator I = MBB.end();
610 unsigned Count = 0;
611 while (I != MBB.begin()) {
612 --I;
613 if (I->isDebugInstr())
614 continue;
615 // Only removing branches from end of MBB.
616 if (!I->isBranch())
617 return Count;
618 if (Count && (I->getOpcode() == Hexagon::J2_jump))
619 llvm_unreachable("Malformed basic block: unconditional branch not last")::llvm::llvm_unreachable_internal("Malformed basic block: unconditional branch not last"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 619)
;
620 MBB.erase(&MBB.back());
621 I = MBB.end();
622 ++Count;
623 }
624 return Count;
625}
626
627unsigned HexagonInstrInfo::insertBranch(MachineBasicBlock &MBB,
628 MachineBasicBlock *TBB,
629 MachineBasicBlock *FBB,
630 ArrayRef<MachineOperand> Cond,
631 const DebugLoc &DL,
632 int *BytesAdded) const {
633 unsigned BOpc = Hexagon::J2_jump;
634 unsigned BccOpc = Hexagon::J2_jumpt;
635 assert(validateBranchCond(Cond) && "Invalid branching condition")(static_cast <bool> (validateBranchCond(Cond) &&
"Invalid branching condition") ? void (0) : __assert_fail ("validateBranchCond(Cond) && \"Invalid branching condition\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 635, __extension__
__PRETTY_FUNCTION__))
;
636 assert(TBB && "insertBranch must not be told to insert a fallthrough")(static_cast <bool> (TBB && "insertBranch must not be told to insert a fallthrough"
) ? void (0) : __assert_fail ("TBB && \"insertBranch must not be told to insert a fallthrough\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 636, __extension__
__PRETTY_FUNCTION__))
;
637 assert(!BytesAdded && "code size not handled")(static_cast <bool> (!BytesAdded && "code size not handled"
) ? void (0) : __assert_fail ("!BytesAdded && \"code size not handled\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 637, __extension__
__PRETTY_FUNCTION__))
;
638
639 // Check if reverseBranchCondition has asked to reverse this branch
640 // If we want to reverse the branch an odd number of times, we want
641 // J2_jumpf.
642 if (!Cond.empty() && Cond[0].isImm())
643 BccOpc = Cond[0].getImm();
644
645 if (!FBB) {
646 if (Cond.empty()) {
647 // Due to a bug in TailMerging/CFG Optimization, we need to add a
648 // special case handling of a predicated jump followed by an
649 // unconditional jump. If not, Tail Merging and CFG Optimization go
650 // into an infinite loop.
651 MachineBasicBlock *NewTBB, *NewFBB;
652 SmallVector<MachineOperand, 4> Cond;
653 auto Term = MBB.getFirstTerminator();
654 if (Term != MBB.end() && isPredicated(*Term) &&
655 !analyzeBranch(MBB, NewTBB, NewFBB, Cond, false) &&
656 MachineFunction::iterator(NewTBB) == ++MBB.getIterator()) {
657 reverseBranchCondition(Cond);
658 removeBranch(MBB);
659 return insertBranch(MBB, TBB, nullptr, Cond, DL);
660 }
661 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
662 } else if (isEndLoopN(Cond[0].getImm())) {
663 int EndLoopOp = Cond[0].getImm();
664 assert(Cond[1].isMBB())(static_cast <bool> (Cond[1].isMBB()) ? void (0) : __assert_fail
("Cond[1].isMBB()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 664, __extension__ __PRETTY_FUNCTION__))
;
665 // Since we're adding an ENDLOOP, there better be a LOOP instruction.
666 // Check for it, and change the BB target if needed.
667 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
668 MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
669 VisitedBBs);
670 assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP")(static_cast <bool> (Loop != nullptr && "Inserting an ENDLOOP without a LOOP"
) ? void (0) : __assert_fail ("Loop != nullptr && \"Inserting an ENDLOOP without a LOOP\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 670, __extension__
__PRETTY_FUNCTION__))
;
671 Loop->getOperand(0).setMBB(TBB);
672 // Add the ENDLOOP after the finding the LOOP0.
673 BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
674 } else if (isNewValueJump(Cond[0].getImm())) {
675 assert((Cond.size() == 3) && "Only supporting rr/ri version of nvjump")(static_cast <bool> ((Cond.size() == 3) && "Only supporting rr/ri version of nvjump"
) ? void (0) : __assert_fail ("(Cond.size() == 3) && \"Only supporting rr/ri version of nvjump\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 675, __extension__
__PRETTY_FUNCTION__))
;
676 // New value jump
677 // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
678 // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
679 unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
680 LLVM_DEBUG(dbgs() << "\nInserting NVJump for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nInserting NVJump for "
<< printMBBReference(MBB);; } } while (false)
681 << printMBBReference(MBB);)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nInserting NVJump for "
<< printMBBReference(MBB);; } } while (false)
;
682 if (Cond[2].isReg()) {
683 unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
684 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
685 addReg(Cond[2].getReg(), Flags2).addMBB(TBB);
686 } else if(Cond[2].isImm()) {
687 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
688 addImm(Cond[2].getImm()).addMBB(TBB);
689 } else
690 llvm_unreachable("Invalid condition for branching")::llvm::llvm_unreachable_internal("Invalid condition for branching"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 690)
;
691 } else {
692 assert((Cond.size() == 2) && "Malformed cond vector")(static_cast <bool> ((Cond.size() == 2) && "Malformed cond vector"
) ? void (0) : __assert_fail ("(Cond.size() == 2) && \"Malformed cond vector\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 692, __extension__
__PRETTY_FUNCTION__))
;
693 const MachineOperand &RO = Cond[1];
694 unsigned Flags = getUndefRegState(RO.isUndef());
695 BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
696 }
697 return 1;
698 }
699 assert((!Cond.empty()) &&(static_cast <bool> ((!Cond.empty()) && "Cond. cannot be empty when multiple branchings are required"
) ? void (0) : __assert_fail ("(!Cond.empty()) && \"Cond. cannot be empty when multiple branchings are required\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 700, __extension__
__PRETTY_FUNCTION__))
700 "Cond. cannot be empty when multiple branchings are required")(static_cast <bool> ((!Cond.empty()) && "Cond. cannot be empty when multiple branchings are required"
) ? void (0) : __assert_fail ("(!Cond.empty()) && \"Cond. cannot be empty when multiple branchings are required\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 700, __extension__
__PRETTY_FUNCTION__))
;
701 assert((!isNewValueJump(Cond[0].getImm())) &&(static_cast <bool> ((!isNewValueJump(Cond[0].getImm())
) && "NV-jump cannot be inserted with another branch"
) ? void (0) : __assert_fail ("(!isNewValueJump(Cond[0].getImm())) && \"NV-jump cannot be inserted with another branch\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 702, __extension__
__PRETTY_FUNCTION__))
702 "NV-jump cannot be inserted with another branch")(static_cast <bool> ((!isNewValueJump(Cond[0].getImm())
) && "NV-jump cannot be inserted with another branch"
) ? void (0) : __assert_fail ("(!isNewValueJump(Cond[0].getImm())) && \"NV-jump cannot be inserted with another branch\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 702, __extension__
__PRETTY_FUNCTION__))
;
703 // Special case for hardware loops. The condition is a basic block.
704 if (isEndLoopN(Cond[0].getImm())) {
705 int EndLoopOp = Cond[0].getImm();
706 assert(Cond[1].isMBB())(static_cast <bool> (Cond[1].isMBB()) ? void (0) : __assert_fail
("Cond[1].isMBB()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 706, __extension__ __PRETTY_FUNCTION__))
;
707 // Since we're adding an ENDLOOP, there better be a LOOP instruction.
708 // Check for it, and change the BB target if needed.
709 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
710 MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
711 VisitedBBs);
712 assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP")(static_cast <bool> (Loop != nullptr && "Inserting an ENDLOOP without a LOOP"
) ? void (0) : __assert_fail ("Loop != nullptr && \"Inserting an ENDLOOP without a LOOP\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 712, __extension__
__PRETTY_FUNCTION__))
;
713 Loop->getOperand(0).setMBB(TBB);
714 // Add the ENDLOOP after the finding the LOOP0.
715 BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
716 } else {
717 const MachineOperand &RO = Cond[1];
718 unsigned Flags = getUndefRegState(RO.isUndef());
719 BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
720 }
721 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
722
723 return 2;
724}
725
726namespace {
727class HexagonPipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo {
728 MachineInstr *Loop, *EndLoop;
729 MachineFunction *MF;
730 const HexagonInstrInfo *TII;
731 int64_t TripCount;
732 Register LoopCount;
733 DebugLoc DL;
734
735public:
736 HexagonPipelinerLoopInfo(MachineInstr *Loop, MachineInstr *EndLoop)
737 : Loop(Loop), EndLoop(EndLoop), MF(Loop->getParent()->getParent()),
738 TII(MF->getSubtarget<HexagonSubtarget>().getInstrInfo()),
739 DL(Loop->getDebugLoc()) {
740 // Inspect the Loop instruction up-front, as it may be deleted when we call
741 // createTripCountGreaterCondition.
742 TripCount = Loop->getOpcode() == Hexagon::J2_loop0r
743 ? -1
744 : Loop->getOperand(1).getImm();
745 if (TripCount == -1)
746 LoopCount = Loop->getOperand(1).getReg();
747 }
748
749 bool shouldIgnoreForPipelining(const MachineInstr *MI) const override {
750 // Only ignore the terminator.
751 return MI == EndLoop;
752 }
753
754 std::optional<bool> createTripCountGreaterCondition(
755 int TC, MachineBasicBlock &MBB,
756 SmallVectorImpl<MachineOperand> &Cond) override {
757 if (TripCount == -1) {
758 // Check if we're done with the loop.
759 Register Done = TII->createVR(MF, MVT::i1);
760 MachineInstr *NewCmp = BuildMI(&MBB, DL,
761 TII->get(Hexagon::C2_cmpgtui), Done)
762 .addReg(LoopCount)
763 .addImm(TC);
764 Cond.push_back(MachineOperand::CreateImm(Hexagon::J2_jumpf));
765 Cond.push_back(NewCmp->getOperand(0));
766 return {};
767 }
768
769 return TripCount > TC;
770 }
771
772 void setPreheader(MachineBasicBlock *NewPreheader) override {
773 NewPreheader->splice(NewPreheader->getFirstTerminator(), Loop->getParent(),
774 Loop);
775 }
776
777 void adjustTripCount(int TripCountAdjust) override {
778 // If the loop trip count is a compile-time value, then just change the
779 // value.
780 if (Loop->getOpcode() == Hexagon::J2_loop0i ||
781 Loop->getOpcode() == Hexagon::J2_loop1i) {
782 int64_t TripCount = Loop->getOperand(1).getImm() + TripCountAdjust;
783 assert(TripCount > 0 && "Can't create an empty or negative loop!")(static_cast <bool> (TripCount > 0 && "Can't create an empty or negative loop!"
) ? void (0) : __assert_fail ("TripCount > 0 && \"Can't create an empty or negative loop!\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 783, __extension__
__PRETTY_FUNCTION__))
;
784 Loop->getOperand(1).setImm(TripCount);
785 return;
786 }
787
788 // The loop trip count is a run-time value. We generate code to subtract
789 // one from the trip count, and update the loop instruction.
790 Register LoopCount = Loop->getOperand(1).getReg();
791 Register NewLoopCount = TII->createVR(MF, MVT::i32);
792 BuildMI(*Loop->getParent(), Loop, Loop->getDebugLoc(),
793 TII->get(Hexagon::A2_addi), NewLoopCount)
794 .addReg(LoopCount)
795 .addImm(TripCountAdjust);
796 Loop->getOperand(1).setReg(NewLoopCount);
797 }
798
799 void disposed() override { Loop->eraseFromParent(); }
800};
801} // namespace
802
803std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
804HexagonInstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {
805 // We really "analyze" only hardware loops right now.
806 MachineBasicBlock::iterator I = LoopBB->getFirstTerminator();
807
808 if (I != LoopBB->end() && isEndLoopN(I->getOpcode())) {
809 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
810 MachineInstr *LoopInst = findLoopInstr(
811 LoopBB, I->getOpcode(), I->getOperand(0).getMBB(), VisitedBBs);
812 if (LoopInst)
813 return std::make_unique<HexagonPipelinerLoopInfo>(LoopInst, &*I);
814 }
815 return nullptr;
816}
817
818bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
819 unsigned NumCycles, unsigned ExtraPredCycles,
820 BranchProbability Probability) const {
821 return nonDbgBBSize(&MBB) <= 3;
822}
823
824bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
825 unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB,
826 unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability)
827 const {
828 return nonDbgBBSize(&TMBB) <= 3 && nonDbgBBSize(&FMBB) <= 3;
829}
830
831bool HexagonInstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
832 unsigned NumInstrs, BranchProbability Probability) const {
833 return NumInstrs <= 4;
834}
835
836static void getLiveInRegsAt(LivePhysRegs &Regs, const MachineInstr &MI) {
837 SmallVector<std::pair<MCPhysReg, const MachineOperand*>,2> Clobbers;
838 const MachineBasicBlock &B = *MI.getParent();
839 Regs.addLiveIns(B);
840 auto E = MachineBasicBlock::const_iterator(MI.getIterator());
841 for (auto I = B.begin(); I != E; ++I) {
842 Clobbers.clear();
843 Regs.stepForward(*I, Clobbers);
844 }
845}
846
847static void getLiveOutRegsAt(LivePhysRegs &Regs, const MachineInstr &MI) {
848 const MachineBasicBlock &B = *MI.getParent();
849 Regs.addLiveOuts(B);
850 auto E = ++MachineBasicBlock::const_iterator(MI.getIterator()).getReverse();
851 for (auto I = B.rbegin(); I != E; ++I)
852 Regs.stepBackward(*I);
853}
854
855void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
856 MachineBasicBlock::iterator I,
857 const DebugLoc &DL, MCRegister DestReg,
858 MCRegister SrcReg, bool KillSrc) const {
859 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
860 unsigned KillFlag = getKillRegState(KillSrc);
861
862 if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
863 BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg)
864 .addReg(SrcReg, KillFlag);
865 return;
866 }
867 if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
868 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg)
869 .addReg(SrcReg, KillFlag);
870 return;
871 }
872 if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
873 // Map Pd = Ps to Pd = or(Ps, Ps).
874 BuildMI(MBB, I, DL, get(Hexagon::C2_or), DestReg)
875 .addReg(SrcReg).addReg(SrcReg, KillFlag);
876 return;
877 }
878 if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
879 Hexagon::IntRegsRegClass.contains(SrcReg)) {
880 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
881 .addReg(SrcReg, KillFlag);
882 return;
883 }
884 if (Hexagon::IntRegsRegClass.contains(DestReg) &&
885 Hexagon::CtrRegsRegClass.contains(SrcReg)) {
886 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrcrr), DestReg)
887 .addReg(SrcReg, KillFlag);
888 return;
889 }
890 if (Hexagon::ModRegsRegClass.contains(DestReg) &&
891 Hexagon::IntRegsRegClass.contains(SrcReg)) {
892 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
893 .addReg(SrcReg, KillFlag);
894 return;
895 }
896 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
897 Hexagon::IntRegsRegClass.contains(DestReg)) {
898 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
899 .addReg(SrcReg, KillFlag);
900 return;
901 }
902 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
903 Hexagon::PredRegsRegClass.contains(DestReg)) {
904 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg)
905 .addReg(SrcReg, KillFlag);
906 return;
907 }
908 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
909 Hexagon::IntRegsRegClass.contains(DestReg)) {
910 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
911 .addReg(SrcReg, KillFlag);
912 return;
913 }
914 if (Hexagon::HvxVRRegClass.contains(SrcReg, DestReg)) {
915 BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
916 addReg(SrcReg, KillFlag);
917 return;
918 }
919 if (Hexagon::HvxWRRegClass.contains(SrcReg, DestReg)) {
920 LivePhysRegs LiveAtMI(HRI);
921 getLiveInRegsAt(LiveAtMI, *I);
922 Register SrcLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
923 Register SrcHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
924 unsigned UndefLo = getUndefRegState(!LiveAtMI.contains(SrcLo));
925 unsigned UndefHi = getUndefRegState(!LiveAtMI.contains(SrcHi));
926 BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg)
927 .addReg(SrcHi, KillFlag | UndefHi)
928 .addReg(SrcLo, KillFlag | UndefLo);
929 return;
930 }
931 if (Hexagon::HvxQRRegClass.contains(SrcReg, DestReg)) {
932 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg)
933 .addReg(SrcReg)
934 .addReg(SrcReg, KillFlag);
935 return;
936 }
937 if (Hexagon::HvxQRRegClass.contains(SrcReg) &&
938 Hexagon::HvxVRRegClass.contains(DestReg)) {
939 llvm_unreachable("Unimplemented pred to vec")::llvm::llvm_unreachable_internal("Unimplemented pred to vec"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 939)
;
940 return;
941 }
942 if (Hexagon::HvxQRRegClass.contains(DestReg) &&
943 Hexagon::HvxVRRegClass.contains(SrcReg)) {
944 llvm_unreachable("Unimplemented vec to pred")::llvm::llvm_unreachable_internal("Unimplemented vec to pred"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 944)
;
945 return;
946 }
947
948#ifndef NDEBUG
949 // Show the invalid registers to ease debugging.
950 dbgs() << "Invalid registers for copy in " << printMBBReference(MBB) << ": "
951 << printReg(DestReg, &HRI) << " = " << printReg(SrcReg, &HRI) << '\n';
952#endif
953 llvm_unreachable("Unimplemented")::llvm::llvm_unreachable_internal("Unimplemented", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 953)
;
954}
955
956void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
957 MachineBasicBlock::iterator I,
958 Register SrcReg, bool isKill, int FI,
959 const TargetRegisterClass *RC,
960 const TargetRegisterInfo *TRI,
961 Register VReg) const {
962 DebugLoc DL = MBB.findDebugLoc(I);
963 MachineFunction &MF = *MBB.getParent();
964 MachineFrameInfo &MFI = MF.getFrameInfo();
965 unsigned KillFlag = getKillRegState(isKill);
966
967 MachineMemOperand *MMO = MF.getMachineMemOperand(
968 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
969 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
970
971 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
972 BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
973 .addFrameIndex(FI).addImm(0)
974 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
975 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
976 BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
977 .addFrameIndex(FI).addImm(0)
978 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
979 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
980 BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
981 .addFrameIndex(FI).addImm(0)
982 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
983 } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
984 BuildMI(MBB, I, DL, get(Hexagon::STriw_ctr))
985 .addFrameIndex(FI).addImm(0)
986 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
987 } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
988 BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai))
989 .addFrameIndex(FI).addImm(0)
990 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
991 } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
992 BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerv_ai))
993 .addFrameIndex(FI).addImm(0)
994 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
995 } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
996 BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerw_ai))
997 .addFrameIndex(FI).addImm(0)
998 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
999 } else {
1000 llvm_unreachable("Unimplemented")::llvm::llvm_unreachable_internal("Unimplemented", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1000)
;
1001 }
1002}
1003
1004void HexagonInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
1005 MachineBasicBlock::iterator I,
1006 Register DestReg, int FI,
1007 const TargetRegisterClass *RC,
1008 const TargetRegisterInfo *TRI,
1009 Register VReg) const {
1010 DebugLoc DL = MBB.findDebugLoc(I);
1011 MachineFunction &MF = *MBB.getParent();
1012 MachineFrameInfo &MFI = MF.getFrameInfo();
1013
1014 MachineMemOperand *MMO = MF.getMachineMemOperand(
1015 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
1016 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
1017
1018 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
1019 BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
1020 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1021 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
1022 BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
1023 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1024 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
1025 BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
1026 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1027 } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
1028 BuildMI(MBB, I, DL, get(Hexagon::LDriw_ctr), DestReg)
1029 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1030 } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
1031 BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg)
1032 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1033 } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
1034 BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrv_ai), DestReg)
1035 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1036 } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
1037 BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrw_ai), DestReg)
1038 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1039 } else {
1040 llvm_unreachable("Can't store this register to stack slot")::llvm::llvm_unreachable_internal("Can't store this register to stack slot"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1040)
;
1041 }
1042}
1043
1044/// expandPostRAPseudo - This function is called for all pseudo instructions
1045/// that remain after register allocation. Many pseudo instructions are
1046/// created to help register allocation. This is the place to convert them
1047/// into real instructions. The target can edit MI in place, or it can insert
1048/// new instructions and erase MI. The function should return true if
1049/// anything was changed.
1050bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1051 MachineBasicBlock &MBB = *MI.getParent();
1052 MachineFunction &MF = *MBB.getParent();
1053 MachineRegisterInfo &MRI = MF.getRegInfo();
1054 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1055 LivePhysRegs LiveIn(HRI), LiveOut(HRI);
1056 DebugLoc DL = MI.getDebugLoc();
1057 unsigned Opc = MI.getOpcode();
1058
1059 auto RealCirc = [&](unsigned Opc, bool HasImm, unsigned MxOp) {
1060 Register Mx = MI.getOperand(MxOp).getReg();
1061 Register CSx = (Mx == Hexagon::M0 ? Hexagon::CS0 : Hexagon::CS1);
1062 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrrcr), CSx)
1063 .add(MI.getOperand((HasImm ? 5 : 4)));
1064 auto MIB = BuildMI(MBB, MI, DL, get(Opc)).add(MI.getOperand(0))
1065 .add(MI.getOperand(1)).add(MI.getOperand(2)).add(MI.getOperand(3));
1066 if (HasImm)
1067 MIB.add(MI.getOperand(4));
1068 MIB.addReg(CSx, RegState::Implicit);
1069 MBB.erase(MI);
1070 return true;
1071 };
1072
1073 auto UseAligned = [&](const MachineInstr &MI, Align NeedAlign) {
1074 if (MI.memoperands().empty())
1075 return false;
1076 return all_of(MI.memoperands(), [NeedAlign](const MachineMemOperand *MMO) {
1077 return MMO->getAlign() >= NeedAlign;
1078 });
1079 };
1080
1081 switch (Opc) {
1
Control jumps to 'case PS_call_instrprof_custom:' at line 1082
1082 case Hexagon::PS_call_instrprof_custom: {
1083 auto Op0 = MI.getOperand(0);
1084 assert(Op0.isGlobal() &&(static_cast <bool> (Op0.isGlobal() && "First operand must be a global containing handler name."
) ? void (0) : __assert_fail ("Op0.isGlobal() && \"First operand must be a global containing handler name.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1085, __extension__
__PRETTY_FUNCTION__))
2
'?' condition is true
1085 "First operand must be a global containing handler name.")(static_cast <bool> (Op0.isGlobal() && "First operand must be a global containing handler name."
) ? void (0) : __assert_fail ("Op0.isGlobal() && \"First operand must be a global containing handler name.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1085, __extension__
__PRETTY_FUNCTION__))
;
1086 const GlobalValue *NameVar = Op0.getGlobal();
1087 const GlobalVariable *GV = dyn_cast<GlobalVariable>(NameVar);
3
Assuming 'NameVar' is not a 'CastReturnType'
4
'GV' initialized to a null pointer value
1088 auto *Arr = cast<ConstantDataArray>(GV->getInitializer());
5
Called C++ object pointer is null
1089 StringRef NameStr = Arr->isCString() ? Arr->getAsCString() : Arr->getAsString();
1090
1091 MachineOperand &Op1 = MI.getOperand(1);
1092 // Set R0 with the imm value to be passed to the custom profiling handler.
1093 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrsi), Hexagon::R0)
1094 .addImm(Op1.getImm());
1095 // The call to the custom handler is being treated as a special one as the
1096 // callee is responsible for saving and restoring all the registers
1097 // (including caller saved registers) it needs to modify. This is
1098 // done to reduce the impact of instrumentation on the code being
1099 // instrumented/profiled.
1100 // NOTE: R14, R15 and R28 are reserved for PLT handling. These registers
1101 // are in the Def list of the Hexagon::PS_call_instrprof_custom and
1102 // therefore will be handled appropriately duing register allocation.
1103
1104 // TODO: It may be a good idea to add a separate pseudo instruction for
1105 // static relocation which doesn't need to reserve r14, r15 and r28.
1106
1107 auto MIB = BuildMI(MBB, MI, DL, get(Hexagon::J2_call))
1108 .addUse(Hexagon::R0, RegState::Implicit|RegState::InternalRead)
1109 .addDef(Hexagon::R29, RegState::ImplicitDefine)
1110 .addDef(Hexagon::R30, RegState::ImplicitDefine)
1111 .addDef(Hexagon::R14, RegState::ImplicitDefine)
1112 .addDef(Hexagon::R15, RegState::ImplicitDefine)
1113 .addDef(Hexagon::R28, RegState::ImplicitDefine);
1114 const char *cstr = MF.createExternalSymbolName(NameStr);
1115 MIB.addExternalSymbol(cstr);
1116 MBB.erase(MI);
1117 return true;
1118 }
1119 case TargetOpcode::COPY: {
1120 MachineOperand &MD = MI.getOperand(0);
1121 MachineOperand &MS = MI.getOperand(1);
1122 MachineBasicBlock::iterator MBBI = MI.getIterator();
1123 if (MD.getReg() != MS.getReg() && !MS.isUndef()) {
1124 copyPhysReg(MBB, MI, DL, MD.getReg(), MS.getReg(), MS.isKill());
1125 std::prev(MBBI)->copyImplicitOps(*MBB.getParent(), MI);
1126 }
1127 MBB.erase(MBBI);
1128 return true;
1129 }
1130 case Hexagon::PS_aligna:
1131 BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI.getOperand(0).getReg())
1132 .addReg(HRI.getFrameRegister())
1133 .addImm(-MI.getOperand(1).getImm());
1134 MBB.erase(MI);
1135 return true;
1136 case Hexagon::V6_vassignp: {
1137 Register SrcReg = MI.getOperand(1).getReg();
1138 Register DstReg = MI.getOperand(0).getReg();
1139 Register SrcLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1140 Register SrcHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1141 getLiveInRegsAt(LiveIn, MI);
1142 unsigned UndefLo = getUndefRegState(!LiveIn.contains(SrcLo));
1143 unsigned UndefHi = getUndefRegState(!LiveIn.contains(SrcHi));
1144 unsigned Kill = getKillRegState(MI.getOperand(1).isKill());
1145 BuildMI(MBB, MI, DL, get(Hexagon::V6_vcombine), DstReg)
1146 .addReg(SrcHi, UndefHi)
1147 .addReg(SrcLo, Kill | UndefLo);
1148 MBB.erase(MI);
1149 return true;
1150 }
1151 case Hexagon::V6_lo: {
1152 Register SrcReg = MI.getOperand(1).getReg();
1153 Register DstReg = MI.getOperand(0).getReg();
1154 Register SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1155 copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill());
1156 MBB.erase(MI);
1157 MRI.clearKillFlags(SrcSubLo);
1158 return true;
1159 }
1160 case Hexagon::V6_hi: {
1161 Register SrcReg = MI.getOperand(1).getReg();
1162 Register DstReg = MI.getOperand(0).getReg();
1163 Register SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1164 copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill());
1165 MBB.erase(MI);
1166 MRI.clearKillFlags(SrcSubHi);
1167 return true;
1168 }
1169 case Hexagon::PS_vloadrv_ai: {
1170 Register DstReg = MI.getOperand(0).getReg();
1171 const MachineOperand &BaseOp = MI.getOperand(1);
1172 assert(BaseOp.getSubReg() == 0)(static_cast <bool> (BaseOp.getSubReg() == 0) ? void (0
) : __assert_fail ("BaseOp.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1172, __extension__ __PRETTY_FUNCTION__))
;
1173 int Offset = MI.getOperand(2).getImm();
1174 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1175 unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vL32b_ai
1176 : Hexagon::V6_vL32Ub_ai;
1177 BuildMI(MBB, MI, DL, get(NewOpc), DstReg)
1178 .addReg(BaseOp.getReg(), getRegState(BaseOp))
1179 .addImm(Offset)
1180 .cloneMemRefs(MI);
1181 MBB.erase(MI);
1182 return true;
1183 }
1184 case Hexagon::PS_vloadrw_ai: {
1185 Register DstReg = MI.getOperand(0).getReg();
1186 const MachineOperand &BaseOp = MI.getOperand(1);
1187 assert(BaseOp.getSubReg() == 0)(static_cast <bool> (BaseOp.getSubReg() == 0) ? void (0
) : __assert_fail ("BaseOp.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1187, __extension__ __PRETTY_FUNCTION__))
;
1188 int Offset = MI.getOperand(2).getImm();
1189 unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1190 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1191 unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vL32b_ai
1192 : Hexagon::V6_vL32Ub_ai;
1193 BuildMI(MBB, MI, DL, get(NewOpc),
1194 HRI.getSubReg(DstReg, Hexagon::vsub_lo))
1195 .addReg(BaseOp.getReg(), getRegState(BaseOp) & ~RegState::Kill)
1196 .addImm(Offset)
1197 .cloneMemRefs(MI);
1198 BuildMI(MBB, MI, DL, get(NewOpc),
1199 HRI.getSubReg(DstReg, Hexagon::vsub_hi))
1200 .addReg(BaseOp.getReg(), getRegState(BaseOp))
1201 .addImm(Offset + VecOffset)
1202 .cloneMemRefs(MI);
1203 MBB.erase(MI);
1204 return true;
1205 }
1206 case Hexagon::PS_vstorerv_ai: {
1207 const MachineOperand &SrcOp = MI.getOperand(2);
1208 assert(SrcOp.getSubReg() == 0)(static_cast <bool> (SrcOp.getSubReg() == 0) ? void (0)
: __assert_fail ("SrcOp.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1208, __extension__ __PRETTY_FUNCTION__))
;
1209 const MachineOperand &BaseOp = MI.getOperand(0);
1210 assert(BaseOp.getSubReg() == 0)(static_cast <bool> (BaseOp.getSubReg() == 0) ? void (0
) : __assert_fail ("BaseOp.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1210, __extension__ __PRETTY_FUNCTION__))
;
1211 int Offset = MI.getOperand(1).getImm();
1212 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1213 unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vS32b_ai
1214 : Hexagon::V6_vS32Ub_ai;
1215 BuildMI(MBB, MI, DL, get(NewOpc))
1216 .addReg(BaseOp.getReg(), getRegState(BaseOp))
1217 .addImm(Offset)
1218 .addReg(SrcOp.getReg(), getRegState(SrcOp))
1219 .cloneMemRefs(MI);
1220 MBB.erase(MI);
1221 return true;
1222 }
1223 case Hexagon::PS_vstorerw_ai: {
1224 Register SrcReg = MI.getOperand(2).getReg();
1225 const MachineOperand &BaseOp = MI.getOperand(0);
1226 assert(BaseOp.getSubReg() == 0)(static_cast <bool> (BaseOp.getSubReg() == 0) ? void (0
) : __assert_fail ("BaseOp.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1226, __extension__ __PRETTY_FUNCTION__))
;
1227 int Offset = MI.getOperand(1).getImm();
1228 unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1229 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1230 unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vS32b_ai
1231 : Hexagon::V6_vS32Ub_ai;
1232 BuildMI(MBB, MI, DL, get(NewOpc))
1233 .addReg(BaseOp.getReg(), getRegState(BaseOp) & ~RegState::Kill)
1234 .addImm(Offset)
1235 .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_lo))
1236 .cloneMemRefs(MI);
1237 BuildMI(MBB, MI, DL, get(NewOpc))
1238 .addReg(BaseOp.getReg(), getRegState(BaseOp))
1239 .addImm(Offset + VecOffset)
1240 .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi))
1241 .cloneMemRefs(MI);
1242 MBB.erase(MI);
1243 return true;
1244 }
1245 case Hexagon::PS_true: {
1246 Register Reg = MI.getOperand(0).getReg();
1247 BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg)
1248 .addReg(Reg, RegState::Undef)
1249 .addReg(Reg, RegState::Undef);
1250 MBB.erase(MI);
1251 return true;
1252 }
1253 case Hexagon::PS_false: {
1254 Register Reg = MI.getOperand(0).getReg();
1255 BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg)
1256 .addReg(Reg, RegState::Undef)
1257 .addReg(Reg, RegState::Undef);
1258 MBB.erase(MI);
1259 return true;
1260 }
1261 case Hexagon::PS_qtrue: {
1262 BuildMI(MBB, MI, DL, get(Hexagon::V6_veqw), MI.getOperand(0).getReg())
1263 .addReg(Hexagon::V0, RegState::Undef)
1264 .addReg(Hexagon::V0, RegState::Undef);
1265 MBB.erase(MI);
1266 return true;
1267 }
1268 case Hexagon::PS_qfalse: {
1269 BuildMI(MBB, MI, DL, get(Hexagon::V6_vgtw), MI.getOperand(0).getReg())
1270 .addReg(Hexagon::V0, RegState::Undef)
1271 .addReg(Hexagon::V0, RegState::Undef);
1272 MBB.erase(MI);
1273 return true;
1274 }
1275 case Hexagon::PS_vdd0: {
1276 Register Vd = MI.getOperand(0).getReg();
1277 BuildMI(MBB, MI, DL, get(Hexagon::V6_vsubw_dv), Vd)
1278 .addReg(Vd, RegState::Undef)
1279 .addReg(Vd, RegState::Undef);
1280 MBB.erase(MI);
1281 return true;
1282 }
1283 case Hexagon::PS_vmulw: {
1284 // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies.
1285 Register DstReg = MI.getOperand(0).getReg();
1286 Register Src1Reg = MI.getOperand(1).getReg();
1287 Register Src2Reg = MI.getOperand(2).getReg();
1288 Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1289 Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1290 Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1291 Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1292 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1293 HRI.getSubReg(DstReg, Hexagon::isub_hi))
1294 .addReg(Src1SubHi)
1295 .addReg(Src2SubHi);
1296 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1297 HRI.getSubReg(DstReg, Hexagon::isub_lo))
1298 .addReg(Src1SubLo)
1299 .addReg(Src2SubLo);
1300 MBB.erase(MI);
1301 MRI.clearKillFlags(Src1SubHi);
1302 MRI.clearKillFlags(Src1SubLo);
1303 MRI.clearKillFlags(Src2SubHi);
1304 MRI.clearKillFlags(Src2SubLo);
1305 return true;
1306 }
1307 case Hexagon::PS_vmulw_acc: {
1308 // Expand 64-bit vector multiply with addition into 2 scalar multiplies.
1309 Register DstReg = MI.getOperand(0).getReg();
1310 Register Src1Reg = MI.getOperand(1).getReg();
1311 Register Src2Reg = MI.getOperand(2).getReg();
1312 Register Src3Reg = MI.getOperand(3).getReg();
1313 Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1314 Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1315 Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1316 Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1317 Register Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi);
1318 Register Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo);
1319 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1320 HRI.getSubReg(DstReg, Hexagon::isub_hi))
1321 .addReg(Src1SubHi)
1322 .addReg(Src2SubHi)
1323 .addReg(Src3SubHi);
1324 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1325 HRI.getSubReg(DstReg, Hexagon::isub_lo))
1326 .addReg(Src1SubLo)
1327 .addReg(Src2SubLo)
1328 .addReg(Src3SubLo);
1329 MBB.erase(MI);
1330 MRI.clearKillFlags(Src1SubHi);
1331 MRI.clearKillFlags(Src1SubLo);
1332 MRI.clearKillFlags(Src2SubHi);
1333 MRI.clearKillFlags(Src2SubLo);
1334 MRI.clearKillFlags(Src3SubHi);
1335 MRI.clearKillFlags(Src3SubLo);
1336 return true;
1337 }
1338 case Hexagon::PS_pselect: {
1339 const MachineOperand &Op0 = MI.getOperand(0);
1340 const MachineOperand &Op1 = MI.getOperand(1);
1341 const MachineOperand &Op2 = MI.getOperand(2);
1342 const MachineOperand &Op3 = MI.getOperand(3);
1343 Register Rd = Op0.getReg();
1344 Register Pu = Op1.getReg();
1345 Register Rs = Op2.getReg();
1346 Register Rt = Op3.getReg();
1347 DebugLoc DL = MI.getDebugLoc();
1348 unsigned K1 = getKillRegState(Op1.isKill());
1349 unsigned K2 = getKillRegState(Op2.isKill());
1350 unsigned K3 = getKillRegState(Op3.isKill());
1351 if (Rd != Rs)
1352 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpt), Rd)
1353 .addReg(Pu, (Rd == Rt) ? K1 : 0)
1354 .addReg(Rs, K2);
1355 if (Rd != Rt)
1356 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpf), Rd)
1357 .addReg(Pu, K1)
1358 .addReg(Rt, K3);
1359 MBB.erase(MI);
1360 return true;
1361 }
1362 case Hexagon::PS_vselect: {
1363 const MachineOperand &Op0 = MI.getOperand(0);
1364 const MachineOperand &Op1 = MI.getOperand(1);
1365 const MachineOperand &Op2 = MI.getOperand(2);
1366 const MachineOperand &Op3 = MI.getOperand(3);
1367 getLiveOutRegsAt(LiveOut, MI);
1368 bool IsDestLive = !LiveOut.available(MRI, Op0.getReg());
1369 Register PReg = Op1.getReg();
1370 assert(Op1.getSubReg() == 0)(static_cast <bool> (Op1.getSubReg() == 0) ? void (0) :
__assert_fail ("Op1.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1370, __extension__ __PRETTY_FUNCTION__))
;
1371 unsigned PState = getRegState(Op1);
1372
1373 if (Op0.getReg() != Op2.getReg()) {
1374 unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1375 : PState;
1376 auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov))
1377 .add(Op0)
1378 .addReg(PReg, S)
1379 .add(Op2);
1380 if (IsDestLive)
1381 T.addReg(Op0.getReg(), RegState::Implicit);
1382 IsDestLive = true;
1383 }
1384 if (Op0.getReg() != Op3.getReg()) {
1385 auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov))
1386 .add(Op0)
1387 .addReg(PReg, PState)
1388 .add(Op3);
1389 if (IsDestLive)
1390 T.addReg(Op0.getReg(), RegState::Implicit);
1391 }
1392 MBB.erase(MI);
1393 return true;
1394 }
1395 case Hexagon::PS_wselect: {
1396 MachineOperand &Op0 = MI.getOperand(0);
1397 MachineOperand &Op1 = MI.getOperand(1);
1398 MachineOperand &Op2 = MI.getOperand(2);
1399 MachineOperand &Op3 = MI.getOperand(3);
1400 getLiveOutRegsAt(LiveOut, MI);
1401 bool IsDestLive = !LiveOut.available(MRI, Op0.getReg());
1402 Register PReg = Op1.getReg();
1403 assert(Op1.getSubReg() == 0)(static_cast <bool> (Op1.getSubReg() == 0) ? void (0) :
__assert_fail ("Op1.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1403, __extension__ __PRETTY_FUNCTION__))
;
1404 unsigned PState = getRegState(Op1);
1405
1406 if (Op0.getReg() != Op2.getReg()) {
1407 unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1408 : PState;
1409 Register SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo);
1410 Register SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi);
1411 auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine))
1412 .add(Op0)
1413 .addReg(PReg, S)
1414 .addReg(SrcHi)
1415 .addReg(SrcLo);
1416 if (IsDestLive)
1417 T.addReg(Op0.getReg(), RegState::Implicit);
1418 IsDestLive = true;
1419 }
1420 if (Op0.getReg() != Op3.getReg()) {
1421 Register SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo);
1422 Register SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi);
1423 auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine))
1424 .add(Op0)
1425 .addReg(PReg, PState)
1426 .addReg(SrcHi)
1427 .addReg(SrcLo);
1428 if (IsDestLive)
1429 T.addReg(Op0.getReg(), RegState::Implicit);
1430 }
1431 MBB.erase(MI);
1432 return true;
1433 }
1434
1435 case Hexagon::PS_crash: {
1436 // Generate a misaligned load that is guaranteed to cause a crash.
1437 class CrashPseudoSourceValue : public PseudoSourceValue {
1438 public:
1439 CrashPseudoSourceValue(const TargetMachine &TM)
1440 : PseudoSourceValue(TargetCustom, TM) {}
1441
1442 bool isConstant(const MachineFrameInfo *) const override {
1443 return false;
1444 }
1445 bool isAliased(const MachineFrameInfo *) const override {
1446 return false;
1447 }
1448 bool mayAlias(const MachineFrameInfo *) const override {
1449 return false;
1450 }
1451 void printCustom(raw_ostream &OS) const override {
1452 OS << "MisalignedCrash";
1453 }
1454 };
1455
1456 static const CrashPseudoSourceValue CrashPSV(MF.getTarget());
1457 MachineMemOperand *MMO = MF.getMachineMemOperand(
1458 MachinePointerInfo(&CrashPSV),
1459 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 8,
1460 Align(1));
1461 BuildMI(MBB, MI, DL, get(Hexagon::PS_loadrdabs), Hexagon::D13)
1462 .addImm(0xBADC0FEE) // Misaligned load.
1463 .addMemOperand(MMO);
1464 MBB.erase(MI);
1465 return true;
1466 }
1467
1468 case Hexagon::PS_tailcall_i:
1469 MI.setDesc(get(Hexagon::J2_jump));
1470 return true;
1471 case Hexagon::PS_tailcall_r:
1472 case Hexagon::PS_jmpret:
1473 MI.setDesc(get(Hexagon::J2_jumpr));
1474 return true;
1475 case Hexagon::PS_jmprett:
1476 MI.setDesc(get(Hexagon::J2_jumprt));
1477 return true;
1478 case Hexagon::PS_jmpretf:
1479 MI.setDesc(get(Hexagon::J2_jumprf));
1480 return true;
1481 case Hexagon::PS_jmprettnewpt:
1482 MI.setDesc(get(Hexagon::J2_jumprtnewpt));
1483 return true;
1484 case Hexagon::PS_jmpretfnewpt:
1485 MI.setDesc(get(Hexagon::J2_jumprfnewpt));
1486 return true;
1487 case Hexagon::PS_jmprettnew:
1488 MI.setDesc(get(Hexagon::J2_jumprtnew));
1489 return true;
1490 case Hexagon::PS_jmpretfnew:
1491 MI.setDesc(get(Hexagon::J2_jumprfnew));
1492 return true;
1493
1494 case Hexagon::PS_loadrub_pci:
1495 return RealCirc(Hexagon::L2_loadrub_pci, /*HasImm*/true, /*MxOp*/4);
1496 case Hexagon::PS_loadrb_pci:
1497 return RealCirc(Hexagon::L2_loadrb_pci, /*HasImm*/true, /*MxOp*/4);
1498 case Hexagon::PS_loadruh_pci:
1499 return RealCirc(Hexagon::L2_loadruh_pci, /*HasImm*/true, /*MxOp*/4);
1500 case Hexagon::PS_loadrh_pci:
1501 return RealCirc(Hexagon::L2_loadrh_pci, /*HasImm*/true, /*MxOp*/4);
1502 case Hexagon::PS_loadri_pci:
1503 return RealCirc(Hexagon::L2_loadri_pci, /*HasImm*/true, /*MxOp*/4);
1504 case Hexagon::PS_loadrd_pci:
1505 return RealCirc(Hexagon::L2_loadrd_pci, /*HasImm*/true, /*MxOp*/4);
1506 case Hexagon::PS_loadrub_pcr:
1507 return RealCirc(Hexagon::L2_loadrub_pcr, /*HasImm*/false, /*MxOp*/3);
1508 case Hexagon::PS_loadrb_pcr:
1509 return RealCirc(Hexagon::L2_loadrb_pcr, /*HasImm*/false, /*MxOp*/3);
1510 case Hexagon::PS_loadruh_pcr:
1511 return RealCirc(Hexagon::L2_loadruh_pcr, /*HasImm*/false, /*MxOp*/3);
1512 case Hexagon::PS_loadrh_pcr:
1513 return RealCirc(Hexagon::L2_loadrh_pcr, /*HasImm*/false, /*MxOp*/3);
1514 case Hexagon::PS_loadri_pcr:
1515 return RealCirc(Hexagon::L2_loadri_pcr, /*HasImm*/false, /*MxOp*/3);
1516 case Hexagon::PS_loadrd_pcr:
1517 return RealCirc(Hexagon::L2_loadrd_pcr, /*HasImm*/false, /*MxOp*/3);
1518 case Hexagon::PS_storerb_pci:
1519 return RealCirc(Hexagon::S2_storerb_pci, /*HasImm*/true, /*MxOp*/3);
1520 case Hexagon::PS_storerh_pci:
1521 return RealCirc(Hexagon::S2_storerh_pci, /*HasImm*/true, /*MxOp*/3);
1522 case Hexagon::PS_storerf_pci:
1523 return RealCirc(Hexagon::S2_storerf_pci, /*HasImm*/true, /*MxOp*/3);
1524 case Hexagon::PS_storeri_pci:
1525 return RealCirc(Hexagon::S2_storeri_pci, /*HasImm*/true, /*MxOp*/3);
1526 case Hexagon::PS_storerd_pci:
1527 return RealCirc(Hexagon::S2_storerd_pci, /*HasImm*/true, /*MxOp*/3);
1528 case Hexagon::PS_storerb_pcr:
1529 return RealCirc(Hexagon::S2_storerb_pcr, /*HasImm*/false, /*MxOp*/2);
1530 case Hexagon::PS_storerh_pcr:
1531 return RealCirc(Hexagon::S2_storerh_pcr, /*HasImm*/false, /*MxOp*/2);
1532 case Hexagon::PS_storerf_pcr:
1533 return RealCirc(Hexagon::S2_storerf_pcr, /*HasImm*/false, /*MxOp*/2);
1534 case Hexagon::PS_storeri_pcr:
1535 return RealCirc(Hexagon::S2_storeri_pcr, /*HasImm*/false, /*MxOp*/2);
1536 case Hexagon::PS_storerd_pcr:
1537 return RealCirc(Hexagon::S2_storerd_pcr, /*HasImm*/false, /*MxOp*/2);
1538 }
1539
1540 return false;
1541}
1542
1543MachineBasicBlock::instr_iterator
1544HexagonInstrInfo::expandVGatherPseudo(MachineInstr &MI) const {
1545 MachineBasicBlock &MBB = *MI.getParent();
1546 const DebugLoc &DL = MI.getDebugLoc();
1547 unsigned Opc = MI.getOpcode();
1548 MachineBasicBlock::iterator First;
1549
1550 switch (Opc) {
1551 case Hexagon::V6_vgathermh_pseudo:
1552 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermh))
1553 .add(MI.getOperand(2))
1554 .add(MI.getOperand(3))
1555 .add(MI.getOperand(4));
1556 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1557 .add(MI.getOperand(0))
1558 .addImm(MI.getOperand(1).getImm())
1559 .addReg(Hexagon::VTMP);
1560 MBB.erase(MI);
1561 return First.getInstrIterator();
1562
1563 case Hexagon::V6_vgathermw_pseudo:
1564 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermw))
1565 .add(MI.getOperand(2))
1566 .add(MI.getOperand(3))
1567 .add(MI.getOperand(4));
1568 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1569 .add(MI.getOperand(0))
1570 .addImm(MI.getOperand(1).getImm())
1571 .addReg(Hexagon::VTMP);
1572 MBB.erase(MI);
1573 return First.getInstrIterator();
1574
1575 case Hexagon::V6_vgathermhw_pseudo:
1576 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhw))
1577 .add(MI.getOperand(2))
1578 .add(MI.getOperand(3))
1579 .add(MI.getOperand(4));
1580 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1581 .add(MI.getOperand(0))
1582 .addImm(MI.getOperand(1).getImm())
1583 .addReg(Hexagon::VTMP);
1584 MBB.erase(MI);
1585 return First.getInstrIterator();
1586
1587 case Hexagon::V6_vgathermhq_pseudo:
1588 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhq))
1589 .add(MI.getOperand(2))
1590 .add(MI.getOperand(3))
1591 .add(MI.getOperand(4))
1592 .add(MI.getOperand(5));
1593 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1594 .add(MI.getOperand(0))
1595 .addImm(MI.getOperand(1).getImm())
1596 .addReg(Hexagon::VTMP);
1597 MBB.erase(MI);
1598 return First.getInstrIterator();
1599
1600 case Hexagon::V6_vgathermwq_pseudo:
1601 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermwq))
1602 .add(MI.getOperand(2))
1603 .add(MI.getOperand(3))
1604 .add(MI.getOperand(4))
1605 .add(MI.getOperand(5));
1606 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1607 .add(MI.getOperand(0))
1608 .addImm(MI.getOperand(1).getImm())
1609 .addReg(Hexagon::VTMP);
1610 MBB.erase(MI);
1611 return First.getInstrIterator();
1612
1613 case Hexagon::V6_vgathermhwq_pseudo:
1614 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhwq))
1615 .add(MI.getOperand(2))
1616 .add(MI.getOperand(3))
1617 .add(MI.getOperand(4))
1618 .add(MI.getOperand(5));
1619 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1620 .add(MI.getOperand(0))
1621 .addImm(MI.getOperand(1).getImm())
1622 .addReg(Hexagon::VTMP);
1623 MBB.erase(MI);
1624 return First.getInstrIterator();
1625 }
1626
1627 return MI.getIterator();
1628}
1629
1630// We indicate that we want to reverse the branch by
1631// inserting the reversed branching opcode.
1632bool HexagonInstrInfo::reverseBranchCondition(
1633 SmallVectorImpl<MachineOperand> &Cond) const {
1634 if (Cond.empty())
1635 return true;
1636 assert(Cond[0].isImm() && "First entry in the cond vector not imm-val")(static_cast <bool> (Cond[0].isImm() && "First entry in the cond vector not imm-val"
) ? void (0) : __assert_fail ("Cond[0].isImm() && \"First entry in the cond vector not imm-val\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1636, __extension__
__PRETTY_FUNCTION__))
;
1637 unsigned opcode = Cond[0].getImm();
1638 //unsigned temp;
1639 assert(get(opcode).isBranch() && "Should be a branching condition.")(static_cast <bool> (get(opcode).isBranch() && "Should be a branching condition."
) ? void (0) : __assert_fail ("get(opcode).isBranch() && \"Should be a branching condition.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1639, __extension__
__PRETTY_FUNCTION__))
;
1640 if (isEndLoopN(opcode))
1641 return true;
1642 unsigned NewOpcode = getInvertedPredicatedOpcode(opcode);
1643 Cond[0].setImm(NewOpcode);
1644 return false;
1645}
1646
1647void HexagonInstrInfo::insertNoop(MachineBasicBlock &MBB,
1648 MachineBasicBlock::iterator MI) const {
1649 DebugLoc DL;
1650 BuildMI(MBB, MI, DL, get(Hexagon::A2_nop));
1651}
1652
1653bool HexagonInstrInfo::isPostIncrement(const MachineInstr &MI) const {
1654 return getAddrMode(MI) == HexagonII::PostInc;
1655}
1656
1657// Returns true if an instruction is predicated irrespective of the predicate
1658// sense. For example, all of the following will return true.
1659// if (p0) R1 = add(R2, R3)
1660// if (!p0) R1 = add(R2, R3)
1661// if (p0.new) R1 = add(R2, R3)
1662// if (!p0.new) R1 = add(R2, R3)
1663// Note: New-value stores are not included here as in the current
1664// implementation, we don't need to check their predicate sense.
1665bool HexagonInstrInfo::isPredicated(const MachineInstr &MI) const {
1666 const uint64_t F = MI.getDesc().TSFlags;
1667 return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
1668}
1669
1670bool HexagonInstrInfo::PredicateInstruction(
1671 MachineInstr &MI, ArrayRef<MachineOperand> Cond) const {
1672 if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
1673 isEndLoopN(Cond[0].getImm())) {
1674 LLVM_DEBUG(dbgs() << "\nCannot predicate:"; MI.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nCannot predicate:"
; MI.dump();; } } while (false)
;
1675 return false;
1676 }
1677 int Opc = MI.getOpcode();
1678 assert (isPredicable(MI) && "Expected predicable instruction")(static_cast <bool> (isPredicable(MI) && "Expected predicable instruction"
) ? void (0) : __assert_fail ("isPredicable(MI) && \"Expected predicable instruction\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1678, __extension__
__PRETTY_FUNCTION__))
;
1679 bool invertJump = predOpcodeHasNot(Cond);
1680
1681 // We have to predicate MI "in place", i.e. after this function returns,
1682 // MI will need to be transformed into a predicated form. To avoid com-
1683 // plicated manipulations with the operands (handling tied operands,
1684 // etc.), build a new temporary instruction, then overwrite MI with it.
1685
1686 MachineBasicBlock &B = *MI.getParent();
1687 DebugLoc DL = MI.getDebugLoc();
1688 unsigned PredOpc = getCondOpcode(Opc, invertJump);
1689 MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc));
1690 unsigned NOp = 0, NumOps = MI.getNumOperands();
1691 while (NOp < NumOps) {
1692 MachineOperand &Op = MI.getOperand(NOp);
1693 if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1694 break;
1695 T.add(Op);
1696 NOp++;
1697 }
1698
1699 Register PredReg;
1700 unsigned PredRegPos, PredRegFlags;
1701 bool GotPredReg = getPredReg(Cond, PredReg, PredRegPos, PredRegFlags);
1702 (void)GotPredReg;
1703 assert(GotPredReg)(static_cast <bool> (GotPredReg) ? void (0) : __assert_fail
("GotPredReg", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1703, __extension__ __PRETTY_FUNCTION__))
;
1704 T.addReg(PredReg, PredRegFlags);
1705 while (NOp < NumOps)
1706 T.add(MI.getOperand(NOp++));
1707
1708 MI.setDesc(get(PredOpc));
1709 while (unsigned n = MI.getNumOperands())
1710 MI.removeOperand(n-1);
1711 for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i)
1712 MI.addOperand(T->getOperand(i));
1713
1714 MachineBasicBlock::instr_iterator TI = T->getIterator();
1715 B.erase(TI);
1716
1717 MachineRegisterInfo &MRI = B.getParent()->getRegInfo();
1718 MRI.clearKillFlags(PredReg);
1719 return true;
1720}
1721
1722bool HexagonInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
1723 ArrayRef<MachineOperand> Pred2) const {
1724 // TODO: Fix this
1725 return false;
1726}
1727
1728bool HexagonInstrInfo::ClobbersPredicate(MachineInstr &MI,
1729 std::vector<MachineOperand> &Pred,
1730 bool SkipDead) const {
1731 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1732
1733 for (const MachineOperand &MO : MI.operands()) {
1734 if (MO.isReg()) {
1735 if (!MO.isDef())
1736 continue;
1737 const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg());
1738 if (RC == &Hexagon::PredRegsRegClass) {
1739 Pred.push_back(MO);
1740 return true;
1741 }
1742 continue;
1743 } else if (MO.isRegMask()) {
1744 for (Register PR : Hexagon::PredRegsRegClass) {
1745 if (!MI.modifiesRegister(PR, &HRI))
1746 continue;
1747 Pred.push_back(MO);
1748 return true;
1749 }
1750 }
1751 }
1752 return false;
1753}
1754
1755bool HexagonInstrInfo::isPredicable(const MachineInstr &MI) const {
1756 if (!MI.getDesc().isPredicable())
1757 return false;
1758
1759 if (MI.isCall() || isTailCall(MI)) {
1760 if (!Subtarget.usePredicatedCalls())
1761 return false;
1762 }
1763
1764 // HVX loads are not predicable on v60, but are on v62.
1765 if (!Subtarget.hasV62Ops()) {
1766 switch (MI.getOpcode()) {
1767 case Hexagon::V6_vL32b_ai:
1768 case Hexagon::V6_vL32b_pi:
1769 case Hexagon::V6_vL32b_ppu:
1770 case Hexagon::V6_vL32b_cur_ai:
1771 case Hexagon::V6_vL32b_cur_pi:
1772 case Hexagon::V6_vL32b_cur_ppu:
1773 case Hexagon::V6_vL32b_nt_ai:
1774 case Hexagon::V6_vL32b_nt_pi:
1775 case Hexagon::V6_vL32b_nt_ppu:
1776 case Hexagon::V6_vL32b_tmp_ai:
1777 case Hexagon::V6_vL32b_tmp_pi:
1778 case Hexagon::V6_vL32b_tmp_ppu:
1779 case Hexagon::V6_vL32b_nt_cur_ai:
1780 case Hexagon::V6_vL32b_nt_cur_pi:
1781 case Hexagon::V6_vL32b_nt_cur_ppu:
1782 case Hexagon::V6_vL32b_nt_tmp_ai:
1783 case Hexagon::V6_vL32b_nt_tmp_pi:
1784 case Hexagon::V6_vL32b_nt_tmp_ppu:
1785 return false;
1786 }
1787 }
1788 return true;
1789}
1790
1791bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1792 const MachineBasicBlock *MBB,
1793 const MachineFunction &MF) const {
1794 // Debug info is never a scheduling boundary. It's necessary to be explicit
1795 // due to the special treatment of IT instructions below, otherwise a
1796 // dbg_value followed by an IT will result in the IT instruction being
1797 // considered a scheduling hazard, which is wrong. It should be the actual
1798 // instruction preceding the dbg_value instruction(s), just like it is
1799 // when debug info is not present.
1800 if (MI.isDebugInstr())
1801 return false;
1802
1803 // Throwing call is a boundary.
1804 if (MI.isCall()) {
1805 // Don't mess around with no return calls.
1806 if (doesNotReturn(MI))
1807 return true;
1808 // If any of the block's successors is a landing pad, this could be a
1809 // throwing call.
1810 for (auto *I : MBB->successors())
1811 if (I->isEHPad())
1812 return true;
1813 }
1814
1815 // Terminators and labels can't be scheduled around.
1816 if (MI.getDesc().isTerminator() || MI.isPosition())
1817 return true;
1818
1819 // INLINEASM_BR can jump to another block
1820 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1821 return true;
1822
1823 if (MI.isInlineAsm() && !ScheduleInlineAsm)
1824 return true;
1825
1826 return false;
1827}
1828
1829/// Measure the specified inline asm to determine an approximation of its
1830/// length.
1831/// Comments (which run till the next SeparatorString or newline) do not
1832/// count as an instruction.
1833/// Any other non-whitespace text is considered an instruction, with
1834/// multiple instructions separated by SeparatorString or newlines.
1835/// Variable-length instructions are not handled here; this function
1836/// may be overloaded in the target code to do that.
1837/// Hexagon counts the number of ##'s and adjust for that many
1838/// constant exenders.
1839unsigned HexagonInstrInfo::getInlineAsmLength(const char *Str,
1840 const MCAsmInfo &MAI,
1841 const TargetSubtargetInfo *STI) const {
1842 StringRef AStr(Str);
1843 // Count the number of instructions in the asm.
1844 bool atInsnStart = true;
1845 unsigned Length = 0;
1846 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
1847 for (; *Str; ++Str) {
1848 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
1849 strlen(MAI.getSeparatorString())) == 0)
1850 atInsnStart = true;
1851 if (atInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
1852 Length += MaxInstLength;
1853 atInsnStart = false;
1854 }
1855 if (atInsnStart && strncmp(Str, MAI.getCommentString().data(),
1856 MAI.getCommentString().size()) == 0)
1857 atInsnStart = false;
1858 }
1859
1860 // Add to size number of constant extenders seen * 4.
1861 StringRef Occ("##");
1862 Length += AStr.count(Occ)*4;
1863 return Length;
1864}
1865
1866ScheduleHazardRecognizer*
1867HexagonInstrInfo::CreateTargetPostRAHazardRecognizer(
1868 const InstrItineraryData *II, const ScheduleDAG *DAG) const {
1869 if (UseDFAHazardRec)
1870 return new HexagonHazardRecognizer(II, this, Subtarget);
1871 return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
1872}
1873
1874/// For a comparison instruction, return the source registers in
1875/// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
1876/// compares against in CmpValue. Return true if the comparison instruction
1877/// can be analyzed.
1878bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1879 Register &SrcReg2, int64_t &Mask,
1880 int64_t &Value) const {
1881 unsigned Opc = MI.getOpcode();
1882
1883 // Set mask and the first source register.
1884 switch (Opc) {
1885 case Hexagon::C2_cmpeq:
1886 case Hexagon::C2_cmpeqp:
1887 case Hexagon::C2_cmpgt:
1888 case Hexagon::C2_cmpgtp:
1889 case Hexagon::C2_cmpgtu:
1890 case Hexagon::C2_cmpgtup:
1891 case Hexagon::C4_cmpneq:
1892 case Hexagon::C4_cmplte:
1893 case Hexagon::C4_cmplteu:
1894 case Hexagon::C2_cmpeqi:
1895 case Hexagon::C2_cmpgti:
1896 case Hexagon::C2_cmpgtui:
1897 case Hexagon::C4_cmpneqi:
1898 case Hexagon::C4_cmplteui:
1899 case Hexagon::C4_cmpltei:
1900 SrcReg = MI.getOperand(1).getReg();
1901 Mask = ~0;
1902 break;
1903 case Hexagon::A4_cmpbeq:
1904 case Hexagon::A4_cmpbgt:
1905 case Hexagon::A4_cmpbgtu:
1906 case Hexagon::A4_cmpbeqi:
1907 case Hexagon::A4_cmpbgti:
1908 case Hexagon::A4_cmpbgtui:
1909 SrcReg = MI.getOperand(1).getReg();
1910 Mask = 0xFF;
1911 break;
1912 case Hexagon::A4_cmpheq:
1913 case Hexagon::A4_cmphgt:
1914 case Hexagon::A4_cmphgtu:
1915 case Hexagon::A4_cmpheqi:
1916 case Hexagon::A4_cmphgti:
1917 case Hexagon::A4_cmphgtui:
1918 SrcReg = MI.getOperand(1).getReg();
1919 Mask = 0xFFFF;
1920 break;
1921 }
1922
1923 // Set the value/second source register.
1924 switch (Opc) {
1925 case Hexagon::C2_cmpeq:
1926 case Hexagon::C2_cmpeqp:
1927 case Hexagon::C2_cmpgt:
1928 case Hexagon::C2_cmpgtp:
1929 case Hexagon::C2_cmpgtu:
1930 case Hexagon::C2_cmpgtup:
1931 case Hexagon::A4_cmpbeq:
1932 case Hexagon::A4_cmpbgt:
1933 case Hexagon::A4_cmpbgtu:
1934 case Hexagon::A4_cmpheq:
1935 case Hexagon::A4_cmphgt:
1936 case Hexagon::A4_cmphgtu:
1937 case Hexagon::C4_cmpneq:
1938 case Hexagon::C4_cmplte:
1939 case Hexagon::C4_cmplteu:
1940 SrcReg2 = MI.getOperand(2).getReg();
1941 Value = 0;
1942 return true;
1943
1944 case Hexagon::C2_cmpeqi:
1945 case Hexagon::C2_cmpgtui:
1946 case Hexagon::C2_cmpgti:
1947 case Hexagon::C4_cmpneqi:
1948 case Hexagon::C4_cmplteui:
1949 case Hexagon::C4_cmpltei:
1950 case Hexagon::A4_cmpbeqi:
1951 case Hexagon::A4_cmpbgti:
1952 case Hexagon::A4_cmpbgtui:
1953 case Hexagon::A4_cmpheqi:
1954 case Hexagon::A4_cmphgti:
1955 case Hexagon::A4_cmphgtui: {
1956 SrcReg2 = 0;
1957 const MachineOperand &Op2 = MI.getOperand(2);
1958 if (!Op2.isImm())
1959 return false;
1960 Value = MI.getOperand(2).getImm();
1961 return true;
1962 }
1963 }
1964
1965 return false;
1966}
1967
1968unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1969 const MachineInstr &MI,
1970 unsigned *PredCost) const {
1971 return getInstrTimingClassLatency(ItinData, MI);
1972}
1973
1974DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
1975 const TargetSubtargetInfo &STI) const {
1976 const InstrItineraryData *II = STI.getInstrItineraryData();
1977 return static_cast<const HexagonSubtarget&>(STI).createDFAPacketizer(II);
1978}
1979
1980// Inspired by this pair:
1981// %r13 = L2_loadri_io %r29, 136; mem:LD4[FixedStack0]
1982// S2_storeri_io %r29, 132, killed %r1; flags: mem:ST4[FixedStack1]
1983// Currently AA considers the addresses in these instructions to be aliasing.
1984bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(
1985 const MachineInstr &MIa, const MachineInstr &MIb) const {
1986 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1987 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1988 return false;
1989
1990 // Instructions that are pure loads, not loads and stores like memops are not
1991 // dependent.
1992 if (MIa.mayLoad() && !isMemOp(MIa) && MIb.mayLoad() && !isMemOp(MIb))
1993 return true;
1994
1995 // Get the base register in MIa.
1996 unsigned BasePosA, OffsetPosA;
1997 if (!getBaseAndOffsetPosition(MIa, BasePosA, OffsetPosA))
1998 return false;
1999 const MachineOperand &BaseA = MIa.getOperand(BasePosA);
2000 Register BaseRegA = BaseA.getReg();
2001 unsigned BaseSubA = BaseA.getSubReg();
2002
2003 // Get the base register in MIb.
2004 unsigned BasePosB, OffsetPosB;
2005 if (!getBaseAndOffsetPosition(MIb, BasePosB, OffsetPosB))
2006 return false;
2007 const MachineOperand &BaseB = MIb.getOperand(BasePosB);
2008 Register BaseRegB = BaseB.getReg();
2009 unsigned BaseSubB = BaseB.getSubReg();
2010
2011 if (BaseRegA != BaseRegB || BaseSubA != BaseSubB)
2012 return false;
2013
2014 // Get the access sizes.
2015 unsigned SizeA = getMemAccessSize(MIa);
2016 unsigned SizeB = getMemAccessSize(MIb);
2017
2018 // Get the offsets. Handle immediates only for now.
2019 const MachineOperand &OffA = MIa.getOperand(OffsetPosA);
2020 const MachineOperand &OffB = MIb.getOperand(OffsetPosB);
2021 if (!MIa.getOperand(OffsetPosA).isImm() ||
2022 !MIb.getOperand(OffsetPosB).isImm())
2023 return false;
2024 int OffsetA = isPostIncrement(MIa) ? 0 : OffA.getImm();
2025 int OffsetB = isPostIncrement(MIb) ? 0 : OffB.getImm();
2026
2027 // This is a mem access with the same base register and known offsets from it.
2028 // Reason about it.
2029 if (OffsetA > OffsetB) {
2030 uint64_t OffDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB);
2031 return SizeB <= OffDiff;
2032 }
2033 if (OffsetA < OffsetB) {
2034 uint64_t OffDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA);
2035 return SizeA <= OffDiff;
2036 }
2037
2038 return false;
2039}
2040
2041/// If the instruction is an increment of a constant value, return the amount.
2042bool HexagonInstrInfo::getIncrementValue(const MachineInstr &MI,
2043 int &Value) const {
2044 if (isPostIncrement(MI)) {
2045 unsigned BasePos = 0, OffsetPos = 0;
2046 if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
2047 return false;
2048 const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
2049 if (OffsetOp.isImm()) {
2050 Value = OffsetOp.getImm();
2051 return true;
2052 }
2053 } else if (MI.getOpcode() == Hexagon::A2_addi) {
2054 const MachineOperand &AddOp = MI.getOperand(2);
2055 if (AddOp.isImm()) {
2056 Value = AddOp.getImm();
2057 return true;
2058 }
2059 }
2060
2061 return false;
2062}
2063
2064std::pair<unsigned, unsigned>
2065HexagonInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
2066 return std::make_pair(TF & ~HexagonII::MO_Bitmasks,
2067 TF & HexagonII::MO_Bitmasks);
2068}
2069
2070ArrayRef<std::pair<unsigned, const char*>>
2071HexagonInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
2072 using namespace HexagonII;
2073
2074 static const std::pair<unsigned, const char*> Flags[] = {
2075 {MO_PCREL, "hexagon-pcrel"},
2076 {MO_GOT, "hexagon-got"},
2077 {MO_LO16, "hexagon-lo16"},
2078 {MO_HI16, "hexagon-hi16"},
2079 {MO_GPREL, "hexagon-gprel"},
2080 {MO_GDGOT, "hexagon-gdgot"},
2081 {MO_GDPLT, "hexagon-gdplt"},
2082 {MO_IE, "hexagon-ie"},
2083 {MO_IEGOT, "hexagon-iegot"},
2084 {MO_TPREL, "hexagon-tprel"}
2085 };
2086 return ArrayRef(Flags);
2087}
2088
2089ArrayRef<std::pair<unsigned, const char*>>
2090HexagonInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
2091 using namespace HexagonII;
2092
2093 static const std::pair<unsigned, const char*> Flags[] = {
2094 {HMOTF_ConstExtended, "hexagon-ext"}
2095 };
2096 return ArrayRef(Flags);
2097}
2098
2099Register HexagonInstrInfo::createVR(MachineFunction *MF, MVT VT) const {
2100 MachineRegisterInfo &MRI = MF->getRegInfo();
2101 const TargetRegisterClass *TRC;
2102 if (VT == MVT::i1) {
2103 TRC = &Hexagon::PredRegsRegClass;
2104 } else if (VT == MVT::i32 || VT == MVT::f32) {
2105 TRC = &Hexagon::IntRegsRegClass;
2106 } else if (VT == MVT::i64 || VT == MVT::f64) {
2107 TRC = &Hexagon::DoubleRegsRegClass;
2108 } else {
2109 llvm_unreachable("Cannot handle this register class")::llvm::llvm_unreachable_internal("Cannot handle this register class"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 2109)
;
2110 }
2111
2112 Register NewReg = MRI.createVirtualRegister(TRC);
2113 return NewReg;
2114}
2115
2116bool HexagonInstrInfo::isAbsoluteSet(const MachineInstr &MI) const {
2117 return (getAddrMode(MI) == HexagonII::AbsoluteSet);
2118}
2119
2120bool HexagonInstrInfo::isAccumulator(const MachineInstr &MI) const {
2121 const uint64_t F = MI.getDesc().TSFlags;
2122 return((F >> HexagonII::AccumulatorPos) & HexagonII::AccumulatorMask);
2123}
2124
2125bool HexagonInstrInfo::isBaseImmOffset(const MachineInstr &MI) const {
2126 return getAddrMode(MI) == HexagonII::BaseImmOffset;
2127}
2128
2129bool HexagonInstrInfo::isComplex(const MachineInstr &MI) const {
2130 return !isTC1(MI) && !isTC2Early(MI) && !MI.getDesc().mayLoad() &&
2131 !MI.getDesc().mayStore() &&
2132 MI.getDesc().getOpcode() != Hexagon::S2_allocframe &&
2133 MI.getDesc().getOpcode() != Hexagon::L2_deallocframe &&
2134 !isMemOp(MI) && !MI.isBranch() && !MI.isReturn() && !MI.isCall();
2135}
2136
2137// Return true if the instruction is a compound branch instruction.
2138bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr &MI) const {
2139 return getType(MI) == HexagonII::TypeCJ && MI.isBranch();
2140}
2141
2142// TODO: In order to have isExtendable for fpimm/f32Ext, we need to handle
2143// isFPImm and later getFPImm as well.
2144bool HexagonInstrInfo::isConstExtended(const MachineInstr &MI) const {
2145 const uint64_t F = MI.getDesc().TSFlags;
2146 unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
2147 if (isExtended) // Instruction must be extended.
2148 return true;
2149
2150 unsigned isExtendable =
2151 (F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask;
2152 if (!isExtendable)
2153 return false;
2154
2155 if (MI.isCall())
2156 return false;
2157
2158 short ExtOpNum = getCExtOpNum(MI);
2159 const MachineOperand &MO = MI.getOperand(ExtOpNum);
2160 // Use MO operand flags to determine if MO
2161 // has the HMOTF_ConstExtended flag set.
2162 if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
2163 return true;
2164 // If this is a Machine BB address we are talking about, and it is
2165 // not marked as extended, say so.
2166 if (MO.isMBB())
2167 return false;
2168
2169 // We could be using an instruction with an extendable immediate and shoehorn
2170 // a global address into it. If it is a global address it will be constant
2171 // extended. We do this for COMBINE.
2172 if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
2173 MO.isJTI() || MO.isCPI() || MO.isFPImm())
2174 return true;
2175
2176 // If the extendable operand is not 'Immediate' type, the instruction should
2177 // have 'isExtended' flag set.
2178 assert(MO.isImm() && "Extendable operand must be Immediate type")(static_cast <bool> (MO.isImm() && "Extendable operand must be Immediate type"
) ? void (0) : __assert_fail ("MO.isImm() && \"Extendable operand must be Immediate type\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 2178, __extension__
__PRETTY_FUNCTION__))
;
2179
2180 int MinValue = getMinValue(MI);
2181 int MaxValue = getMaxValue(MI);
2182 int ImmValue = MO.getImm();
2183
2184 return (ImmValue < MinValue || ImmValue > MaxValue);
2185}
2186
2187bool HexagonInstrInfo::isDeallocRet(const MachineInstr &MI) const {
2188 switch (MI.getOpcode()) {
2189 case Hexagon::L4_return:
2190 case Hexagon::L4_return_t:
2191 case Hexagon::L4_return_f:
2192 case Hexagon::L4_return_tnew_pnt:
2193 case Hexagon::L4_return_fnew_pnt:
2194 case Hexagon::L4_return_tnew_pt:
2195 case Hexagon::L4_return_fnew_pt:
2196 return true;
2197 }
2198 return false;
2199}
2200
2201// Return true when ConsMI uses a register defined by ProdMI.
2202bool HexagonInstrInfo::isDependent(const MachineInstr &ProdMI,
2203 const MachineInstr &ConsMI) const {
2204 if (!ProdMI.getDesc().getNumDefs())
2205 return false;
2206 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
2207
2208 SmallVector<Register, 4> DefsA;
2209 SmallVector<Register, 4> DefsB;
2210 SmallVector<Register, 8> UsesA;
2211 SmallVector<Register, 8> UsesB;
2212
2213 parseOperands(ProdMI, DefsA, UsesA);
2214 parseOperands(ConsMI, DefsB, UsesB);
2215
2216 for (auto &RegA : DefsA)
2217 for (auto &RegB : UsesB) {
2218 // True data dependency.
2219 if (RegA == RegB)
2220 return true;
2221
2222 if (RegA.isPhysical())
2223 for (MCPhysReg SubReg : HRI.subregs(RegA))
2224 if (RegB == SubReg)
2225 return true;
2226
2227 if (RegB.isPhysical())
2228 for (MCPhysReg SubReg : HRI.subregs(RegB))
2229 if (RegA == SubReg)
2230 return true;
2231 }
2232
2233 return false;
2234}
2235
2236// Returns true if the instruction is alread a .cur.
2237bool HexagonInstrInfo::isDotCurInst(const MachineInstr &MI) const {
2238 switch (MI.getOpcode()) {
2239 case Hexagon::V6_vL32b_cur_pi:
2240 case Hexagon::V6_vL32b_cur_ai:
2241 return true;
2242 }
2243 return false;
2244}
2245
2246// Returns true, if any one of the operands is a dot new
2247// insn, whether it is predicated dot new or register dot new.
2248bool HexagonInstrInfo::isDotNewInst(const MachineInstr &MI) const {
2249 if (isNewValueInst(MI) || (isPredicated(MI) && isPredicatedNew(MI)))
2250 return true;
2251
2252 return false;
2253}
2254
2255/// Symmetrical. See if these two instructions are fit for duplex pair.
2256bool HexagonInstrInfo::isDuplexPair(const MachineInstr &MIa,
2257 const MachineInstr &MIb) const {
2258 HexagonII::SubInstructionGroup MIaG = getDuplexCandidateGroup(MIa);
2259 HexagonII::SubInstructionGroup MIbG = getDuplexCandidateGroup(MIb);
2260 return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG));
2261}
2262
2263bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
2264 return (Opcode == Hexagon::ENDLOOP0 ||
2265 Opcode == Hexagon::ENDLOOP1);
2266}
2267
2268bool HexagonInstrInfo::isExpr(unsigned OpType) const {
2269 switch(OpType) {
2270 case MachineOperand::MO_MachineBasicBlock:
2271 case MachineOperand::MO_GlobalAddress:
2272 case MachineOperand::MO_ExternalSymbol:
2273 case MachineOperand::MO_JumpTableIndex:
2274 case MachineOperand::MO_ConstantPoolIndex:
2275 case MachineOperand::MO_BlockAddress:
2276 return true;
2277 default:
2278 return false;
2279 }
2280}
2281
2282bool HexagonInstrInfo::isExtendable(const MachineInstr &MI) const {
2283 const MCInstrDesc &MID = MI.getDesc();
2284 const uint64_t F = MID.TSFlags;
2285 if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
2286 return true;
2287
2288 // TODO: This is largely obsolete now. Will need to be removed
2289 // in consecutive patches.
2290 switch (MI.getOpcode()) {
2291 // PS_fi and PS_fia remain special cases.
2292 case Hexagon::PS_fi:
2293 case Hexagon::PS_fia:
2294 return true;
2295 default:
2296 return false;
2297 }
2298 return false;
2299}
2300
2301// This returns true in two cases:
2302// - The OP code itself indicates that this is an extended instruction.
2303// - One of MOs has been marked with HMOTF_ConstExtended flag.
2304bool HexagonInstrInfo::isExtended(const MachineInstr &MI) const {
2305 // First check if this is permanently extended op code.
2306 const uint64_t F = MI.getDesc().TSFlags;
2307 if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask)
2308 return true;
2309 // Use MO operand flags to determine if one of MI's operands
2310 // has HMOTF_ConstExtended flag set.
2311 for (const MachineOperand &MO : MI.operands())
2312 if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
2313 return true;
2314 return false;
2315}
2316
2317bool HexagonInstrInfo::isFloat(const MachineInstr &MI) const {
2318 unsigned Opcode = MI.getOpcode();
2319 const uint64_t F = get(Opcode).TSFlags;
2320 return (F >> HexagonII::FPPos) & HexagonII::FPMask;
2321}
2322
2323// No V60 HVX VMEM with A_INDIRECT.
2324bool HexagonInstrInfo::isHVXMemWithAIndirect(const MachineInstr &I,
2325 const MachineInstr &J) const {
2326 if (!isHVXVec(I))
2327 return false;
2328 if (!I.mayLoad() && !I.mayStore())
2329 return false;
2330 return J.isIndirectBranch() || isIndirectCall(J) || isIndirectL4Return(J);
2331}
2332
2333bool HexagonInstrInfo::isIndirectCall(const MachineInstr &MI) const {
2334 switch (MI.getOpcode()) {
2335 case Hexagon::J2_callr:
2336 case Hexagon::J2_callrf:
2337 case Hexagon::J2_callrt:
2338 case Hexagon::PS_call_nr:
2339 return true;
2340 }
2341 return false;
2342}
2343
2344bool HexagonInstrInfo::isIndirectL4Return(const MachineInstr &MI) const {
2345 switch (MI.getOpcode()) {
2346 case Hexagon::L4_return:
2347 case Hexagon::L4_return_t:
2348 case Hexagon::L4_return_f:
2349 case Hexagon::L4_return_fnew_pnt:
2350 case Hexagon::L4_return_fnew_pt:
2351 case Hexagon::L4_return_tnew_pnt:
2352 case Hexagon::L4_return_tnew_pt:
2353 return true;
2354 }
2355 return false;
2356}
2357
2358bool HexagonInstrInfo::isJumpR(const MachineInstr &MI) const {
2359 switch (MI.getOpcode()) {
2360 case Hexagon::J2_jumpr:
2361 case Hexagon::J2_jumprt:
2362 case Hexagon::J2_jumprf:
2363 case Hexagon::J2_jumprtnewpt:
2364 case Hexagon::J2_jumprfnewpt:
2365 case Hexagon::J2_jumprtnew:
2366 case Hexagon::J2_jumprfnew:
2367 return true;
2368 }
2369 return false;
2370}
2371
2372// Return true if a given MI can accommodate given offset.
2373// Use abs estimate as oppose to the exact number.
2374// TODO: This will need to be changed to use MC level
2375// definition of instruction extendable field size.
2376bool HexagonInstrInfo::isJumpWithinBranchRange(const MachineInstr &MI,
2377 unsigned offset) const {
2378 // This selection of jump instructions matches to that what
2379 // analyzeBranch can parse, plus NVJ.
2380 if (isNewValueJump(MI)) // r9:2
2381 return isInt<11>(offset);
2382
2383 switch (MI.getOpcode()) {
2384 // Still missing Jump to address condition on register value.
2385 default:
2386 return false;
2387 case Hexagon::J2_jump: // bits<24> dst; // r22:2
2388 case Hexagon::J2_call:
2389 case Hexagon::PS_call_nr:
2390 return isInt<24>(offset);
2391 case Hexagon::J2_jumpt: //bits<17> dst; // r15:2
2392 case Hexagon::J2_jumpf:
2393 case Hexagon::J2_jumptnew:
2394 case Hexagon::J2_jumptnewpt:
2395 case Hexagon::J2_jumpfnew:
2396 case Hexagon::J2_jumpfnewpt:
2397 case Hexagon::J2_callt:
2398 case Hexagon::J2_callf:
2399 return isInt<17>(offset);
2400 case Hexagon::J2_loop0i:
2401 case Hexagon::J2_loop0iext:
2402 case Hexagon::J2_loop0r:
2403 case Hexagon::J2_loop0rext:
2404 case Hexagon::J2_loop1i:
2405 case Hexagon::J2_loop1iext:
2406 case Hexagon::J2_loop1r:
2407 case Hexagon::J2_loop1rext:
2408 return isInt<9>(offset);
2409 // TODO: Add all the compound branches here. Can we do this in Relation model?
2410 case Hexagon::J4_cmpeqi_tp0_jump_nt:
2411 case Hexagon::J4_cmpeqi_tp1_jump_nt:
2412 case Hexagon::J4_cmpeqn1_tp0_jump_nt:
2413 case Hexagon::J4_cmpeqn1_tp1_jump_nt:
2414 return isInt<11>(offset);
2415 }
2416}
2417
2418bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr &MI) const {
2419 // Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
2420 // resource, but all operands can be received late like an ALU instruction.
2421 return getType(MI) == HexagonII::TypeCVI_VX_LATE;
2422}
2423
2424bool HexagonInstrInfo::isLoopN(const MachineInstr &MI) const {
2425 unsigned Opcode = MI.getOpcode();
2426 return Opcode == Hexagon::J2_loop0i ||
2427 Opcode == Hexagon::J2_loop0r ||
2428 Opcode == Hexagon::J2_loop0iext ||
2429 Opcode == Hexagon::J2_loop0rext ||
2430 Opcode == Hexagon::J2_loop1i ||
2431 Opcode == Hexagon::J2_loop1r ||
2432 Opcode == Hexagon::J2_loop1iext ||
2433 Opcode == Hexagon::J2_loop1rext;
2434}
2435
2436bool HexagonInstrInfo::isMemOp(const MachineInstr &MI) const {
2437 switch (MI.getOpcode()) {
2438 default: return false;
2439 case Hexagon::L4_iadd_memopw_io:
2440 case Hexagon::L4_isub_memopw_io:
2441 case Hexagon::L4_add_memopw_io:
2442 case Hexagon::L4_sub_memopw_io:
2443 case Hexagon::L4_and_memopw_io:
2444 case Hexagon::L4_or_memopw_io:
2445 case Hexagon::L4_iadd_memoph_io:
2446 case Hexagon::L4_isub_memoph_io:
2447 case Hexagon::L4_add_memoph_io:
2448 case Hexagon::L4_sub_memoph_io:
2449 case Hexagon::L4_and_memoph_io:
2450 case Hexagon::L4_or_memoph_io:
2451 case Hexagon::L4_iadd_memopb_io:
2452 case Hexagon::L4_isub_memopb_io:
2453 case Hexagon::L4_add_memopb_io:
2454 case Hexagon::L4_sub_memopb_io:
2455 case Hexagon::L4_and_memopb_io:
2456 case Hexagon::L4_or_memopb_io:
2457 case Hexagon::L4_ior_memopb_io:
2458 case Hexagon::L4_ior_memoph_io:
2459 case Hexagon::L4_ior_memopw_io:
2460 case Hexagon::L4_iand_memopb_io:
2461 case Hexagon::L4_iand_memoph_io:
2462 case Hexagon::L4_iand_memopw_io:
2463 return true;
2464 }
2465 return false;
2466}
2467
2468bool HexagonInstrInfo::isNewValue(const MachineInstr &MI) const {
2469 const uint64_t F = MI.getDesc().TSFlags;
2470 return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2471}
2472
2473bool HexagonInstrInfo::isNewValue(unsigned Opcode) const {
2474 const uint64_t F = get(Opcode).TSFlags;
2475 return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2476}
2477
2478bool HexagonInstrInfo::isNewValueInst(const MachineInstr &MI) const {
2479 return isNewValueJump(MI) || isNewValueStore(MI);
2480}
2481
2482bool HexagonInstrInfo::isNewValueJump(const MachineInstr &MI) const {
2483 return isNewValue(MI) && MI.isBranch();
2484}
2485
2486bool HexagonInstrInfo::isNewValueJump(unsigned Opcode) const {
2487 return isNewValue(Opcode) && get(Opcode).isBranch() && isPredicated(Opcode);
2488}
2489
2490bool HexagonInstrInfo::isNewValueStore(const MachineInstr &MI) const {
2491 const uint64_t F = MI.getDesc().TSFlags;
2492 return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2493}
2494
2495bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
2496 const uint64_t F = get(Opcode).TSFlags;
2497 return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2498}
2499
2500// Returns true if a particular operand is extendable for an instruction.
2501bool HexagonInstrInfo::isOperandExtended(const MachineInstr &MI,
2502 unsigned OperandNum) const {
2503 const uint64_t F = MI.getDesc().TSFlags;
2504 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
2505 == OperandNum;
2506}
2507
2508bool HexagonInstrInfo::isPredicatedNew(const MachineInstr &MI) const {
2509 const uint64_t F = MI.getDesc().TSFlags;
2510 assert(isPredicated(MI))(static_cast <bool> (isPredicated(MI)) ? void (0) : __assert_fail
("isPredicated(MI)", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2510, __extension__ __PRETTY_FUNCTION__))
;
2511 return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2512}
2513
2514bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
2515 const uint64_t F = get(Opcode).TSFlags;
2516 assert(isPredicated(Opcode))(static_cast <bool> (isPredicated(Opcode)) ? void (0) :
__assert_fail ("isPredicated(Opcode)", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2516, __extension__ __PRETTY_FUNCTION__))
;
2517 return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2518}
2519
2520bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr &MI) const {
2521 const uint64_t F = MI.getDesc().TSFlags;
2522 return !((F >> HexagonII::PredicatedFalsePos) &
2523 HexagonII::PredicatedFalseMask);
2524}
2525
2526bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
2527 const uint64_t F = get(Opcode).TSFlags;
2528 // Make sure that the instruction is predicated.
2529 assert((F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask)(static_cast <bool> ((F>> HexagonII::PredicatedPos
) & HexagonII::PredicatedMask) ? void (0) : __assert_fail
("(F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 2529, __extension__
__PRETTY_FUNCTION__))
;
2530 return !((F >> HexagonII::PredicatedFalsePos) &
2531 HexagonII::PredicatedFalseMask);
2532}
2533
2534bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
2535 const uint64_t F = get(Opcode).TSFlags;
2536 return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
2537}
2538
2539bool HexagonInstrInfo::isPredicateLate(unsigned Opcode) const {
2540 const uint64_t F = get(Opcode).TSFlags;
2541 return (F >> HexagonII::PredicateLatePos) & HexagonII::PredicateLateMask;
2542}
2543
2544bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const {
2545 const uint64_t F = get(Opcode).TSFlags;
2546 assert(get(Opcode).isBranch() &&(static_cast <bool> (get(Opcode).isBranch() && (
isPredicatedNew(Opcode) || isNewValue(Opcode))) ? void (0) : __assert_fail
("get(Opcode).isBranch() && (isPredicatedNew(Opcode) || isNewValue(Opcode))"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 2547, __extension__
__PRETTY_FUNCTION__))
2547 (isPredicatedNew(Opcode) || isNewValue(Opcode)))(static_cast <bool> (get(Opcode).isBranch() && (
isPredicatedNew(Opcode) || isNewValue(Opcode))) ? void (0) : __assert_fail
("get(Opcode).isBranch() && (isPredicatedNew(Opcode) || isNewValue(Opcode))"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 2547, __extension__
__PRETTY_FUNCTION__))
;
2548 return (F >> HexagonII::TakenPos) & HexagonII::TakenMask;
2549}
2550
2551bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr &MI) const {
2552 return MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2553 MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
2554 MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
2555 MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
2556}
2557
2558bool HexagonInstrInfo::isSignExtendingLoad(const MachineInstr &MI) const {
2559 switch (MI.getOpcode()) {
2560 // Byte
2561 case Hexagon::L2_loadrb_io:
2562 case Hexagon::L4_loadrb_ur:
2563 case Hexagon::L4_loadrb_ap:
2564 case Hexagon::L2_loadrb_pr:
2565 case Hexagon::L2_loadrb_pbr:
2566 case Hexagon::L2_loadrb_pi:
2567 case Hexagon::L2_loadrb_pci:
2568 case Hexagon::L2_loadrb_pcr:
2569 case Hexagon::L2_loadbsw2_io:
2570 case Hexagon::L4_loadbsw2_ur:
2571 case Hexagon::L4_loadbsw2_ap:
2572 case Hexagon::L2_loadbsw2_pr:
2573 case Hexagon::L2_loadbsw2_pbr:
2574 case Hexagon::L2_loadbsw2_pi:
2575 case Hexagon::L2_loadbsw2_pci:
2576 case Hexagon::L2_loadbsw2_pcr:
2577 case Hexagon::L2_loadbsw4_io:
2578 case Hexagon::L4_loadbsw4_ur:
2579 case Hexagon::L4_loadbsw4_ap:
2580 case Hexagon::L2_loadbsw4_pr:
2581 case Hexagon::L2_loadbsw4_pbr:
2582 case Hexagon::L2_loadbsw4_pi:
2583 case Hexagon::L2_loadbsw4_pci:
2584 case Hexagon::L2_loadbsw4_pcr:
2585 case Hexagon::L4_loadrb_rr:
2586 case Hexagon::L2_ploadrbt_io:
2587 case Hexagon::L2_ploadrbt_pi:
2588 case Hexagon::L2_ploadrbf_io:
2589 case Hexagon::L2_ploadrbf_pi:
2590 case Hexagon::L2_ploadrbtnew_io:
2591 case Hexagon::L2_ploadrbfnew_io:
2592 case Hexagon::L4_ploadrbt_rr:
2593 case Hexagon::L4_ploadrbf_rr:
2594 case Hexagon::L4_ploadrbtnew_rr:
2595 case Hexagon::L4_ploadrbfnew_rr:
2596 case Hexagon::L2_ploadrbtnew_pi:
2597 case Hexagon::L2_ploadrbfnew_pi:
2598 case Hexagon::L4_ploadrbt_abs:
2599 case Hexagon::L4_ploadrbf_abs:
2600 case Hexagon::L4_ploadrbtnew_abs:
2601 case Hexagon::L4_ploadrbfnew_abs:
2602 case Hexagon::L2_loadrbgp:
2603 // Half
2604 case Hexagon::L2_loadrh_io:
2605 case Hexagon::L4_loadrh_ur:
2606 case Hexagon::L4_loadrh_ap:
2607 case Hexagon::L2_loadrh_pr:
2608 case Hexagon::L2_loadrh_pbr:
2609 case Hexagon::L2_loadrh_pi:
2610 case Hexagon::L2_loadrh_pci:
2611 case Hexagon::L2_loadrh_pcr:
2612 case Hexagon::L4_loadrh_rr:
2613 case Hexagon::L2_ploadrht_io:
2614 case Hexagon::L2_ploadrht_pi:
2615 case Hexagon::L2_ploadrhf_io:
2616 case Hexagon::L2_ploadrhf_pi:
2617 case Hexagon::L2_ploadrhtnew_io:
2618 case Hexagon::L2_ploadrhfnew_io:
2619 case Hexagon::L4_ploadrht_rr:
2620 case Hexagon::L4_ploadrhf_rr:
2621 case Hexagon::L4_ploadrhtnew_rr:
2622 case Hexagon::L4_ploadrhfnew_rr:
2623 case Hexagon::L2_ploadrhtnew_pi:
2624 case Hexagon::L2_ploadrhfnew_pi:
2625 case Hexagon::L4_ploadrht_abs:
2626 case Hexagon::L4_ploadrhf_abs:
2627 case Hexagon::L4_ploadrhtnew_abs:
2628 case Hexagon::L4_ploadrhfnew_abs:
2629 case Hexagon::L2_loadrhgp:
2630 return true;
2631 default:
2632 return false;
2633 }
2634}
2635
2636bool HexagonInstrInfo::isSolo(const MachineInstr &MI) const {
2637 const uint64_t F = MI.getDesc().TSFlags;
2638 return (F >> HexagonII::SoloPos) & HexagonII::SoloMask;
2639}
2640
2641bool HexagonInstrInfo::isSpillPredRegOp(const MachineInstr &MI) const {
2642 switch (MI.getOpcode()) {
2643 case Hexagon::STriw_pred:
2644 case Hexagon::LDriw_pred:
2645 return true;
2646 default:
2647 return false;
2648 }
2649}
2650
2651bool HexagonInstrInfo::isTailCall(const MachineInstr &MI) const {
2652 if (!MI.isBranch())
2653 return false;
2654
2655 for (auto &Op : MI.operands())
2656 if (Op.isGlobal() || Op.isSymbol())
2657 return true;
2658 return false;
2659}
2660
2661// Returns true when SU has a timing class TC1.
2662bool HexagonInstrInfo::isTC1(const MachineInstr &MI) const {
2663 unsigned SchedClass = MI.getDesc().getSchedClass();
2664 return is_TC1(SchedClass);
2665}
2666
2667bool HexagonInstrInfo::isTC2(const MachineInstr &MI) const {
2668 unsigned SchedClass = MI.getDesc().getSchedClass();
2669 return is_TC2(SchedClass);
2670}
2671
2672bool HexagonInstrInfo::isTC2Early(const MachineInstr &MI) const {
2673 unsigned SchedClass = MI.getDesc().getSchedClass();
2674 return is_TC2early(SchedClass);
2675}
2676
2677bool HexagonInstrInfo::isTC4x(const MachineInstr &MI) const {
2678 unsigned SchedClass = MI.getDesc().getSchedClass();
2679 return is_TC4x(SchedClass);
2680}
2681
2682// Schedule this ASAP.
2683bool HexagonInstrInfo::isToBeScheduledASAP(const MachineInstr &MI1,
2684 const MachineInstr &MI2) const {
2685 if (mayBeCurLoad(MI1)) {
2686 // if (result of SU is used in Next) return true;
2687 Register DstReg = MI1.getOperand(0).getReg();
2688 int N = MI2.getNumOperands();
2689 for (int I = 0; I < N; I++)
2690 if (MI2.getOperand(I).isReg() && DstReg == MI2.getOperand(I).getReg())
2691 return true;
2692 }
2693 if (mayBeNewStore(MI2))
2694 if (MI2.getOpcode() == Hexagon::V6_vS32b_pi)
2695 if (MI1.getOperand(0).isReg() && MI2.getOperand(3).isReg() &&
2696 MI1.getOperand(0).getReg() == MI2.getOperand(3).getReg())
2697 return true;
2698 return false;
2699}
2700
2701bool HexagonInstrInfo::isHVXVec(const MachineInstr &MI) const {
2702 const uint64_t V = getType(MI);
2703 return HexagonII::TypeCVI_FIRST <= V && V <= HexagonII::TypeCVI_LAST;
2704}
2705
2706// Check if the Offset is a valid auto-inc imm by Load/Store Type.
2707bool HexagonInstrInfo::isValidAutoIncImm(const EVT VT, int Offset) const {
2708 int Size = VT.getSizeInBits() / 8;
2709 if (Offset % Size != 0)
2710 return false;
2711 int Count = Offset / Size;
2712
2713 switch (VT.getSimpleVT().SimpleTy) {
2714 // For scalars the auto-inc is s4
2715 case MVT::i8:
2716 case MVT::i16:
2717 case MVT::i32:
2718 case MVT::i64:
2719 case MVT::f32:
2720 case MVT::f64:
2721 case MVT::v2i16:
2722 case MVT::v2i32:
2723 case MVT::v4i8:
2724 case MVT::v4i16:
2725 case MVT::v8i8:
2726 return isInt<4>(Count);
2727 // For HVX vectors the auto-inc is s3
2728 case MVT::v64i8:
2729 case MVT::v32i16:
2730 case MVT::v16i32:
2731 case MVT::v8i64:
2732 case MVT::v128i8:
2733 case MVT::v64i16:
2734 case MVT::v32i32:
2735 case MVT::v16i64:
2736 return isInt<3>(Count);
2737 default:
2738 break;
2739 }
2740
2741 llvm_unreachable("Not an valid type!")::llvm::llvm_unreachable_internal("Not an valid type!", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2741)
;
2742}
2743
2744bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
2745 const TargetRegisterInfo *TRI, bool Extend) const {
2746 // This function is to check whether the "Offset" is in the correct range of
2747 // the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is
2748 // inserted to calculate the final address. Due to this reason, the function
2749 // assumes that the "Offset" has correct alignment.
2750 // We used to assert if the offset was not properly aligned, however,
2751 // there are cases where a misaligned pointer recast can cause this
2752 // problem, and we need to allow for it. The front end warns of such
2753 // misaligns with respect to load size.
2754 switch (Opcode) {
2755 case Hexagon::PS_vstorerq_ai:
2756 case Hexagon::PS_vstorerv_ai:
2757 case Hexagon::PS_vstorerw_ai:
2758 case Hexagon::PS_vstorerw_nt_ai:
2759 case Hexagon::PS_vloadrq_ai:
2760 case Hexagon::PS_vloadrv_ai:
2761 case Hexagon::PS_vloadrw_ai:
2762 case Hexagon::PS_vloadrw_nt_ai:
2763 case Hexagon::V6_vL32b_ai:
2764 case Hexagon::V6_vS32b_ai:
2765 case Hexagon::V6_vS32b_qpred_ai:
2766 case Hexagon::V6_vS32b_nqpred_ai:
2767 case Hexagon::V6_vL32b_nt_ai:
2768 case Hexagon::V6_vS32b_nt_ai:
2769 case Hexagon::V6_vL32Ub_ai:
2770 case Hexagon::V6_vS32Ub_ai:
2771 case Hexagon::V6_vgathermh_pseudo:
2772 case Hexagon::V6_vgathermw_pseudo:
2773 case Hexagon::V6_vgathermhw_pseudo:
2774 case Hexagon::V6_vgathermhq_pseudo:
2775 case Hexagon::V6_vgathermwq_pseudo:
2776 case Hexagon::V6_vgathermhwq_pseudo: {
2777 unsigned VectorSize = TRI->getSpillSize(Hexagon::HvxVRRegClass);
2778 assert(isPowerOf2_32(VectorSize))(static_cast <bool> (isPowerOf2_32(VectorSize)) ? void (
0) : __assert_fail ("isPowerOf2_32(VectorSize)", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2778, __extension__ __PRETTY_FUNCTION__))
;
2779 if (Offset & (VectorSize-1))
2780 return false;
2781 return isInt<4>(Offset >> Log2_32(VectorSize));
2782 }
2783
2784 case Hexagon::J2_loop0i:
2785 case Hexagon::J2_loop1i:
2786 return isUInt<10>(Offset);
2787
2788 case Hexagon::S4_storeirb_io:
2789 case Hexagon::S4_storeirbt_io:
2790 case Hexagon::S4_storeirbf_io:
2791 return isUInt<6>(Offset);
2792
2793 case Hexagon::S4_storeirh_io:
2794 case Hexagon::S4_storeirht_io:
2795 case Hexagon::S4_storeirhf_io:
2796 return isShiftedUInt<6,1>(Offset);
2797
2798 case Hexagon::S4_storeiri_io:
2799 case Hexagon::S4_storeirit_io:
2800 case Hexagon::S4_storeirif_io:
2801 return isShiftedUInt<6,2>(Offset);
2802 // Handle these two compare instructions that are not extendable.
2803 case Hexagon::A4_cmpbeqi:
2804 return isUInt<8>(Offset);
2805 case Hexagon::A4_cmpbgti:
2806 return isInt<8>(Offset);
2807 }
2808
2809 if (Extend)
2810 return true;
2811
2812 switch (Opcode) {
2813 case Hexagon::L2_loadri_io:
2814 case Hexagon::S2_storeri_io:
2815 return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
2816 (Offset <= Hexagon_MEMW_OFFSET_MAX);
2817
2818 case Hexagon::L2_loadrd_io:
2819 case Hexagon::S2_storerd_io:
2820 return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
2821 (Offset <= Hexagon_MEMD_OFFSET_MAX);
2822
2823 case Hexagon::L2_loadrh_io:
2824 case Hexagon::L2_loadruh_io:
2825 case Hexagon::S2_storerh_io:
2826 case Hexagon::S2_storerf_io:
2827 return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
2828 (Offset <= Hexagon_MEMH_OFFSET_MAX);
2829
2830 case Hexagon::L2_loadrb_io:
2831 case Hexagon::L2_loadrub_io:
2832 case Hexagon::S2_storerb_io:
2833 return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
2834 (Offset <= Hexagon_MEMB_OFFSET_MAX);
2835
2836 case Hexagon::A2_addi:
2837 return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
2838 (Offset <= Hexagon_ADDI_OFFSET_MAX);
2839
2840 case Hexagon::L4_iadd_memopw_io:
2841 case Hexagon::L4_isub_memopw_io:
2842 case Hexagon::L4_add_memopw_io:
2843 case Hexagon::L4_sub_memopw_io:
2844 case Hexagon::L4_iand_memopw_io:
2845 case Hexagon::L4_ior_memopw_io:
2846 case Hexagon::L4_and_memopw_io:
2847 case Hexagon::L4_or_memopw_io:
2848 return (0 <= Offset && Offset <= 255);
2849
2850 case Hexagon::L4_iadd_memoph_io:
2851 case Hexagon::L4_isub_memoph_io:
2852 case Hexagon::L4_add_memoph_io:
2853 case Hexagon::L4_sub_memoph_io:
2854 case Hexagon::L4_iand_memoph_io:
2855 case Hexagon::L4_ior_memoph_io:
2856 case Hexagon::L4_and_memoph_io:
2857 case Hexagon::L4_or_memoph_io:
2858 return (0 <= Offset && Offset <= 127);
2859
2860 case Hexagon::L4_iadd_memopb_io:
2861 case Hexagon::L4_isub_memopb_io:
2862 case Hexagon::L4_add_memopb_io:
2863 case Hexagon::L4_sub_memopb_io:
2864 case Hexagon::L4_iand_memopb_io:
2865 case Hexagon::L4_ior_memopb_io:
2866 case Hexagon::L4_and_memopb_io:
2867 case Hexagon::L4_or_memopb_io:
2868 return (0 <= Offset && Offset <= 63);
2869
2870 // LDriw_xxx and STriw_xxx are pseudo operations, so it has to take offset of
2871 // any size. Later pass knows how to handle it.
2872 case Hexagon::STriw_pred:
2873 case Hexagon::LDriw_pred:
2874 case Hexagon::STriw_ctr:
2875 case Hexagon::LDriw_ctr:
2876 return true;
2877
2878 case Hexagon::PS_fi:
2879 case Hexagon::PS_fia:
2880 case Hexagon::INLINEASM:
2881 return true;
2882
2883 case Hexagon::L2_ploadrbt_io:
2884 case Hexagon::L2_ploadrbf_io:
2885 case Hexagon::L2_ploadrubt_io:
2886 case Hexagon::L2_ploadrubf_io:
2887 case Hexagon::S2_pstorerbt_io:
2888 case Hexagon::S2_pstorerbf_io:
2889 return isUInt<6>(Offset);
2890
2891 case Hexagon::L2_ploadrht_io:
2892 case Hexagon::L2_ploadrhf_io:
2893 case Hexagon::L2_ploadruht_io:
2894 case Hexagon::L2_ploadruhf_io:
2895 case Hexagon::S2_pstorerht_io:
2896 case Hexagon::S2_pstorerhf_io:
2897 return isShiftedUInt<6,1>(Offset);
2898
2899 case Hexagon::L2_ploadrit_io:
2900 case Hexagon::L2_ploadrif_io:
2901 case Hexagon::S2_pstorerit_io:
2902 case Hexagon::S2_pstorerif_io:
2903 return isShiftedUInt<6,2>(Offset);
2904
2905 case Hexagon::L2_ploadrdt_io:
2906 case Hexagon::L2_ploadrdf_io:
2907 case Hexagon::S2_pstorerdt_io:
2908 case Hexagon::S2_pstorerdf_io:
2909 return isShiftedUInt<6,3>(Offset);
2910
2911 case Hexagon::L2_loadbsw2_io:
2912 case Hexagon::L2_loadbzw2_io:
2913 return isShiftedInt<11,1>(Offset);
2914
2915 case Hexagon::L2_loadbsw4_io:
2916 case Hexagon::L2_loadbzw4_io:
2917 return isShiftedInt<11,2>(Offset);
2918 } // switch
2919
2920 dbgs() << "Failed Opcode is : " << Opcode << " (" << getName(Opcode)
2921 << ")\n";
2922 llvm_unreachable("No offset range is defined for this opcode. "::llvm::llvm_unreachable_internal("No offset range is defined for this opcode. "
"Please define it in the above switch statement!", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2923)
2923 "Please define it in the above switch statement!")::llvm::llvm_unreachable_internal("No offset range is defined for this opcode. "
"Please define it in the above switch statement!", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2923)
;
2924}
2925
2926bool HexagonInstrInfo::isVecAcc(const MachineInstr &MI) const {
2927 return isHVXVec(MI) && isAccumulator(MI);
2928}
2929
2930bool HexagonInstrInfo::isVecALU(const MachineInstr &MI) const {
2931 const uint64_t F = get(MI.getOpcode()).TSFlags;
2932 const uint64_t V = ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
2933 return
2934 V == HexagonII::TypeCVI_VA ||
2935 V == HexagonII::TypeCVI_VA_DV;
2936}
2937
2938bool HexagonInstrInfo::isVecUsableNextPacket(const MachineInstr &ProdMI,
2939 const MachineInstr &ConsMI) const {
2940 if (EnableACCForwarding && isVecAcc(ProdMI) && isVecAcc(ConsMI))
2941 return true;
2942
2943 if (EnableALUForwarding && (isVecALU(ConsMI) || isLateSourceInstr(ConsMI)))
2944 return true;
2945
2946 if (mayBeNewStore(ConsMI))
2947 return true;
2948
2949 return false;
2950}
2951
2952bool HexagonInstrInfo::isZeroExtendingLoad(const MachineInstr &MI) const {
2953 switch (MI.getOpcode()) {
2954 // Byte
2955 case Hexagon::L2_loadrub_io:
2956 case Hexagon::L4_loadrub_ur:
2957 case Hexagon::L4_loadrub_ap:
2958 case Hexagon::L2_loadrub_pr:
2959 case Hexagon::L2_loadrub_pbr:
2960 case Hexagon::L2_loadrub_pi:
2961 case Hexagon::L2_loadrub_pci:
2962 case Hexagon::L2_loadrub_pcr:
2963 case Hexagon::L2_loadbzw2_io:
2964 case Hexagon::L4_loadbzw2_ur:
2965 case Hexagon::L4_loadbzw2_ap:
2966 case Hexagon::L2_loadbzw2_pr:
2967 case Hexagon::L2_loadbzw2_pbr:
2968 case Hexagon::L2_loadbzw2_pi:
2969 case Hexagon::L2_loadbzw2_pci:
2970 case Hexagon::L2_loadbzw2_pcr:
2971 case Hexagon::L2_loadbzw4_io:
2972 case Hexagon::L4_loadbzw4_ur:
2973 case Hexagon::L4_loadbzw4_ap:
2974 case Hexagon::L2_loadbzw4_pr:
2975 case Hexagon::L2_loadbzw4_pbr:
2976 case Hexagon::L2_loadbzw4_pi:
2977 case Hexagon::L2_loadbzw4_pci:
2978 case Hexagon::L2_loadbzw4_pcr:
2979 case Hexagon::L4_loadrub_rr:
2980 case Hexagon::L2_ploadrubt_io:
2981 case Hexagon::L2_ploadrubt_pi:
2982 case Hexagon::L2_ploadrubf_io:
2983 case Hexagon::L2_ploadrubf_pi:
2984 case Hexagon::L2_ploadrubtnew_io:
2985 case Hexagon::L2_ploadrubfnew_io:
2986 case Hexagon::L4_ploadrubt_rr:
2987 case Hexagon::L4_ploadrubf_rr:
2988 case Hexagon::L4_ploadrubtnew_rr:
2989 case Hexagon::L4_ploadrubfnew_rr:
2990 case Hexagon::L2_ploadrubtnew_pi:
2991 case Hexagon::L2_ploadrubfnew_pi:
2992 case Hexagon::L4_ploadrubt_abs:
2993 case Hexagon::L4_ploadrubf_abs:
2994 case Hexagon::L4_ploadrubtnew_abs:
2995 case Hexagon::L4_ploadrubfnew_abs:
2996 case Hexagon::L2_loadrubgp:
2997 // Half
2998 case Hexagon::L2_loadruh_io:
2999 case Hexagon::L4_loadruh_ur:
3000 case Hexagon::L4_loadruh_ap:
3001 case Hexagon::L2_loadruh_pr:
3002 case Hexagon::L2_loadruh_pbr:
3003 case Hexagon::L2_loadruh_pi:
3004 case Hexagon::L2_loadruh_pci:
3005 case Hexagon::L2_loadruh_pcr:
3006 case Hexagon::L4_loadruh_rr:
3007 case Hexagon::L2_ploadruht_io:
3008 case Hexagon::L2_ploadruht_pi:
3009 case Hexagon::L2_ploadruhf_io:
3010 case Hexagon::L2_ploadruhf_pi:
3011 case Hexagon::L2_ploadruhtnew_io:
3012 case Hexagon::L2_ploadruhfnew_io:
3013 case Hexagon::L4_ploadruht_rr:
3014 case Hexagon::L4_ploadruhf_rr:
3015 case Hexagon::L4_ploadruhtnew_rr:
3016 case Hexagon::L4_ploadruhfnew_rr:
3017 case Hexagon::L2_ploadruhtnew_pi:
3018 case Hexagon::L2_ploadruhfnew_pi:
3019 case Hexagon::L4_ploadruht_abs:
3020 case Hexagon::L4_ploadruhf_abs:
3021 case Hexagon::L4_ploadruhtnew_abs:
3022 case Hexagon::L4_ploadruhfnew_abs:
3023 case Hexagon::L2_loadruhgp:
3024 return true;
3025 default:
3026 return false;
3027 }
3028}
3029
3030// Add latency to instruction.
3031bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
3032 const MachineInstr &MI2) const {
3033 if (isHVXVec(MI1) && isHVXVec(MI2))
3034 if (!isVecUsableNextPacket(MI1, MI2))
3035 return true;
3036 return false;
3037}
3038
3039/// Get the base register and byte offset of a load/store instr.
3040bool HexagonInstrInfo::getMemOperandsWithOffsetWidth(
3041 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
3042 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
3043 const TargetRegisterInfo *TRI) const {
3044 OffsetIsScalable = false;
3045 const MachineOperand *BaseOp = getBaseAndOffset(LdSt, Offset, Width);
3046 if (!BaseOp || !BaseOp->isReg())
3047 return false;
3048 BaseOps.push_back(BaseOp);
3049 return true;
3050}
3051
3052/// Can these instructions execute at the same time in a bundle.
3053bool HexagonInstrInfo::canExecuteInBundle(const MachineInstr &First,
3054 const MachineInstr &Second) const {
3055 if (Second.mayStore() && First.getOpcode() == Hexagon::S2_allocframe) {
3056 const MachineOperand &Op = Second.getOperand(0);
3057 if (Op.isReg() && Op.isUse() && Op.getReg() == Hexagon::R29)
3058 return true;
3059 }
3060 if (DisableNVSchedule)
3061 return false;
3062 if (mayBeNewStore(Second)) {
3063 // Make sure the definition of the first instruction is the value being
3064 // stored.
3065 const MachineOperand &Stored =
3066 Second.getOperand(Second.getNumOperands() - 1);
3067 if (!Stored.isReg())
3068 return false;
3069 for (unsigned i = 0, e = First.getNumOperands(); i < e; ++i) {
3070 const MachineOperand &Op = First.getOperand(i);
3071 if (Op.isReg() && Op.isDef() && Op.getReg() == Stored.getReg())
3072 return true;
3073 }
3074 }
3075 return false;
3076}
3077
3078bool HexagonInstrInfo::doesNotReturn(const MachineInstr &CallMI) const {
3079 unsigned Opc = CallMI.getOpcode();
3080 return Opc == Hexagon::PS_call_nr || Opc == Hexagon::PS_callr_nr;
3081}
3082
3083bool HexagonInstrInfo::hasEHLabel(const MachineBasicBlock *B) const {
3084 for (auto &I : *B)
3085 if (I.isEHLabel())
3086 return true;
3087 return false;
3088}
3089
3090// Returns true if an instruction can be converted into a non-extended
3091// equivalent instruction.
3092bool HexagonInstrInfo::hasNonExtEquivalent(const MachineInstr &MI) const {
3093 short NonExtOpcode;
3094 // Check if the instruction has a register form that uses register in place
3095 // of the extended operand, if so return that as the non-extended form.
3096 if (Hexagon::getRegForm(MI.getOpcode()) >= 0)
3097 return true;
3098
3099 if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
3100 // Check addressing mode and retrieve non-ext equivalent instruction.
3101
3102 switch (getAddrMode(MI)) {
3103 case HexagonII::Absolute:
3104 // Load/store with absolute addressing mode can be converted into
3105 // base+offset mode.
3106 NonExtOpcode = Hexagon::changeAddrMode_abs_io(MI.getOpcode());
3107 break;
3108 case HexagonII::BaseImmOffset:
3109 // Load/store with base+offset addressing mode can be converted into
3110 // base+register offset addressing mode. However left shift operand should
3111 // be set to 0.
3112 NonExtOpcode = Hexagon::changeAddrMode_io_rr(MI.getOpcode());
3113 break;
3114 case HexagonII::BaseLongOffset:
3115 NonExtOpcode = Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
3116 break;
3117 default:
3118 return false;
3119 }
3120 if (NonExtOpcode < 0)
3121 return false;
3122 return true;
3123 }
3124 return false;
3125}
3126
3127bool HexagonInstrInfo::hasPseudoInstrPair(const MachineInstr &MI) const {
3128 return Hexagon::getRealHWInstr(MI.getOpcode(),
3129 Hexagon::InstrType_Pseudo) >= 0;
3130}
3131
3132bool HexagonInstrInfo::hasUncondBranch(const MachineBasicBlock *B)
3133 const {
3134 MachineBasicBlock::const_iterator I = B->getFirstTerminator(), E = B->end();
3135 while (I != E) {
3136 if (I->isBarrier())
3137 return true;
3138 ++I;
3139 }
3140 return false;
3141}
3142
3143// Returns true, if a LD insn can be promoted to a cur load.
3144bool HexagonInstrInfo::mayBeCurLoad(const MachineInstr &MI) const {
3145 const uint64_t F = MI.getDesc().TSFlags;
3146 return ((F >> HexagonII::mayCVLoadPos) & HexagonII::mayCVLoadMask) &&
3147 Subtarget.hasV60Ops();
3148}
3149
3150// Returns true, if a ST insn can be promoted to a new-value store.
3151bool HexagonInstrInfo::mayBeNewStore(const MachineInstr &MI) const {
3152 if (MI.mayStore() && !Subtarget.useNewValueStores())
3153 return false;
3154
3155 const uint64_t F = MI.getDesc().TSFlags;
3156 return (F >> HexagonII::mayNVStorePos) & HexagonII::mayNVStoreMask;
3157}
3158
3159bool HexagonInstrInfo::producesStall(const MachineInstr &ProdMI,
3160 const MachineInstr &ConsMI) const {
3161 // There is no stall when ProdMI is not a V60 vector.
3162 if (!isHVXVec(ProdMI))
3163 return false;
3164
3165 // There is no stall when ProdMI and ConsMI are not dependent.
3166 if (!isDependent(ProdMI, ConsMI))
3167 return false;
3168
3169 // When Forward Scheduling is enabled, there is no stall if ProdMI and ConsMI
3170 // are scheduled in consecutive packets.
3171 if (isVecUsableNextPacket(ProdMI, ConsMI))
3172 return false;
3173
3174 return true;
3175}
3176
3177bool HexagonInstrInfo::producesStall(const MachineInstr &MI,
3178 MachineBasicBlock::const_instr_iterator BII) const {
3179 // There is no stall when I is not a V60 vector.
3180 if (!isHVXVec(MI))
3181 return false;
3182
3183 MachineBasicBlock::const_instr_iterator MII = BII;
3184 MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end();
3185
3186 if (!MII->isBundle())
3187 return producesStall(*MII, MI);
3188
3189 for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
3190 const MachineInstr &J = *MII;
3191 if (producesStall(J, MI))
3192 return true;
3193 }
3194 return false;
3195}
3196
3197bool HexagonInstrInfo::predCanBeUsedAsDotNew(const MachineInstr &MI,
3198 Register PredReg) const {
3199 for (const MachineOperand &MO : MI.operands()) {
3200 // Predicate register must be explicitly defined.
3201 if (MO.isRegMask() && MO.clobbersPhysReg(PredReg))
3202 return false;
3203 if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
3204 return false;
3205 }
3206
3207 // Instruction that produce late predicate cannot be used as sources of
3208 // dot-new.
3209 switch (MI.getOpcode()) {
3210 case Hexagon::A4_addp_c:
3211 case Hexagon::A4_subp_c:
3212 case Hexagon::A4_tlbmatch:
3213 case Hexagon::A5_ACS:
3214 case Hexagon::F2_sfinvsqrta:
3215 case Hexagon::F2_sfrecipa:
3216 case Hexagon::J2_endloop0:
3217 case Hexagon::J2_endloop01:
3218 case Hexagon::J2_ploop1si:
3219 case Hexagon::J2_ploop1sr:
3220 case Hexagon::J2_ploop2si:
3221 case Hexagon::J2_ploop2sr:
3222 case Hexagon::J2_ploop3si:
3223 case Hexagon::J2_ploop3sr:
3224 case Hexagon::S2_cabacdecbin:
3225 case Hexagon::S2_storew_locked:
3226 case Hexagon::S4_stored_locked:
3227 return false;
3228 }
3229 return true;
3230}
3231
3232bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const {
3233 return Opcode == Hexagon::J2_jumpt ||
3234 Opcode == Hexagon::J2_jumptpt ||
3235 Opcode == Hexagon::J2_jumpf ||
3236 Opcode == Hexagon::J2_jumpfpt ||
3237 Opcode == Hexagon::J2_jumptnew ||
3238 Opcode == Hexagon::J2_jumpfnew ||
3239 Opcode == Hexagon::J2_jumptnewpt ||
3240 Opcode == Hexagon::J2_jumpfnewpt;
3241}
3242
3243bool HexagonInstrInfo::predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const {
3244 if (Cond.empty() || !isPredicated(Cond[0].getImm()))
3245 return false;
3246 return !isPredicatedTrue(Cond[0].getImm());
3247}
3248
3249unsigned HexagonInstrInfo::getAddrMode(const MachineInstr &MI) const {
3250 const uint64_t F = MI.getDesc().TSFlags;
3251 return (F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask;
3252}
3253
3254// Returns the base register in a memory access (load/store). The offset is
3255// returned in Offset and the access size is returned in AccessSize.
3256// If the base operand has a subregister or the offset field does not contain
3257// an immediate value, return nullptr.
3258MachineOperand *HexagonInstrInfo::getBaseAndOffset(const MachineInstr &MI,
3259 int64_t &Offset,
3260 unsigned &AccessSize) const {
3261 // Return if it is not a base+offset type instruction or a MemOp.
3262 if (getAddrMode(MI) != HexagonII::BaseImmOffset &&
3263 getAddrMode(MI) != HexagonII::BaseLongOffset &&
3264 !isMemOp(MI) && !isPostIncrement(MI))
3265 return nullptr;
3266
3267 AccessSize = getMemAccessSize(MI);
3268
3269 unsigned BasePos = 0, OffsetPos = 0;
3270 if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
3271 return nullptr;
3272
3273 // Post increment updates its EA after the mem access,
3274 // so we need to treat its offset as zero.
3275 if (isPostIncrement(MI)) {
3276 Offset = 0;
3277 } else {
3278 const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
3279 if (!OffsetOp.isImm())
3280 return nullptr;
3281 Offset = OffsetOp.getImm();
3282 }
3283
3284 const MachineOperand &BaseOp = MI.getOperand(BasePos);
3285 if (BaseOp.getSubReg() != 0)
3286 return nullptr;
3287 return &const_cast<MachineOperand&>(BaseOp);
3288}
3289
3290/// Return the position of the base and offset operands for this instruction.
3291bool HexagonInstrInfo::getBaseAndOffsetPosition(const MachineInstr &MI,
3292 unsigned &BasePos, unsigned &OffsetPos) const {
3293 if (!isAddrModeWithOffset(MI) && !isPostIncrement(MI))
3294 return false;
3295
3296 // Deal with memops first.
3297 if (isMemOp(MI)) {
3298 BasePos = 0;
3299 OffsetPos = 1;
3300 } else if (MI.mayStore()) {
3301 BasePos = 0;
3302 OffsetPos = 1;
3303 } else if (MI.mayLoad()) {
3304 BasePos = 1;
3305 OffsetPos = 2;
3306 } else
3307 return false;
3308
3309 if (isPredicated(MI)) {
3310 BasePos++;
3311 OffsetPos++;
3312 }
3313 if (isPostIncrement(MI)) {
3314 BasePos++;
3315 OffsetPos++;
3316 }
3317
3318 if (!MI.getOperand(BasePos).isReg() || !MI.getOperand(OffsetPos).isImm())
3319 return false;
3320
3321 return true;
3322}
3323
3324// Inserts branching instructions in reverse order of their occurrence.
3325// e.g. jump_t t1 (i1)
3326// jump t2 (i2)
3327// Jumpers = {i2, i1}
3328SmallVector<MachineInstr*, 2> HexagonInstrInfo::getBranchingInstrs(
3329 MachineBasicBlock& MBB) const {
3330 SmallVector<MachineInstr*, 2> Jumpers;
3331 // If the block has no terminators, it just falls into the block after it.
3332 MachineBasicBlock::instr_iterator I = MBB.instr_end();
3333 if (I == MBB.instr_begin())
3334 return Jumpers;
3335
3336 // A basic block may looks like this:
3337 //
3338 // [ insn
3339 // EH_LABEL
3340 // insn
3341 // insn
3342 // insn
3343 // EH_LABEL
3344 // insn ]
3345 //
3346 // It has two succs but does not have a terminator
3347 // Don't know how to handle it.
3348 do {
3349 --I;
3350 if (I->isEHLabel())
3351 return Jumpers;
3352 } while (I != MBB.instr_begin());
3353
3354 I = MBB.instr_end();
3355 --I;
3356
3357 while (I->isDebugInstr()) {
3358 if (I == MBB.instr_begin())
3359 return Jumpers;
3360 --I;
3361 }
3362 if (!isUnpredicatedTerminator(*I))
3363 return Jumpers;
3364
3365 // Get the last instruction in the block.
3366 MachineInstr *LastInst = &*I;
3367 Jumpers.push_back(LastInst);
3368 MachineInstr *SecondLastInst = nullptr;
3369 // Find one more terminator if present.
3370 do {
3371 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
3372 if (!SecondLastInst) {
3373 SecondLastInst = &*I;
3374 Jumpers.push_back(SecondLastInst);
3375 } else // This is a third branch.
3376 return Jumpers;
3377 }
3378 if (I == MBB.instr_begin())
3379 break;
3380 --I;
3381 } while (true);
3382 return Jumpers;
3383}
3384
3385// Returns Operand Index for the constant extended instruction.
3386unsigned HexagonInstrInfo::getCExtOpNum(const MachineInstr &MI) const {
3387 const uint64_t F = MI.getDesc().TSFlags;
3388 return (F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask;
3389}
3390
3391// See if instruction could potentially be a duplex candidate.
3392// If so, return its group. Zero otherwise.
3393HexagonII::CompoundGroup HexagonInstrInfo::getCompoundCandidateGroup(
3394 const MachineInstr &MI) const {
3395 Register DstReg, SrcReg, Src1Reg, Src2Reg;
3396
3397 switch (MI.getOpcode()) {
3398 default:
3399 return HexagonII::HCG_None;
3400 //
3401 // Compound pairs.
3402 // "p0=cmp.eq(Rs16,Rt16); if (p0.new) jump:nt #r9:2"
3403 // "Rd16=#U6 ; jump #r9:2"
3404 // "Rd16=Rs16 ; jump #r9:2"
3405 //
3406 case Hexagon::C2_cmpeq:
3407 case Hexagon::C2_cmpgt:
3408 case Hexagon::C2_cmpgtu:
3409 DstReg = MI.getOperand(0).getReg();
3410 Src1Reg = MI.getOperand(1).getReg();
3411 Src2Reg = MI.getOperand(2).getReg();
3412 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3413 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3414 isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg))
3415 return HexagonII::HCG_A;
3416 break;
3417 case Hexagon::C2_cmpeqi:
3418 case Hexagon::C2_cmpgti:
3419 case Hexagon::C2_cmpgtui:
3420 // P0 = cmp.eq(Rs,#u2)
3421 DstReg = MI.getOperand(0).getReg();
3422 SrcReg = MI.getOperand(1).getReg();
3423 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3424 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3425 isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
3426 ((isUInt<5>(MI.getOperand(2).getImm())) ||
3427 (MI.getOperand(2).getImm() == -1)))
3428 return HexagonII::HCG_A;
3429 break;
3430 case Hexagon::A2_tfr:
3431 // Rd = Rs
3432 DstReg = MI.getOperand(0).getReg();
3433 SrcReg = MI.getOperand(1).getReg();
3434 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3435 return HexagonII::HCG_A;
3436 break;
3437 case Hexagon::A2_tfrsi:
3438 // Rd = #u6
3439 // Do not test for #u6 size since the const is getting extended
3440 // regardless and compound could be formed.
3441 DstReg = MI.getOperand(0).getReg();
3442 if (isIntRegForSubInst(DstReg))
3443 return HexagonII::HCG_A;
3444 break;
3445 case Hexagon::S2_tstbit_i:
3446 DstReg = MI.getOperand(0).getReg();
3447 Src1Reg = MI.getOperand(1).getReg();
3448 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3449 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3450 MI.getOperand(2).isImm() &&
3451 isIntRegForSubInst(Src1Reg) && (MI.getOperand(2).getImm() == 0))
3452 return HexagonII::HCG_A;
3453 break;
3454 // The fact that .new form is used pretty much guarantees
3455 // that predicate register will match. Nevertheless,
3456 // there could be some false positives without additional
3457 // checking.
3458 case Hexagon::J2_jumptnew:
3459 case Hexagon::J2_jumpfnew:
3460 case Hexagon::J2_jumptnewpt:
3461 case Hexagon::J2_jumpfnewpt:
3462 Src1Reg = MI.getOperand(0).getReg();
3463 if (Hexagon::PredRegsRegClass.contains(Src1Reg) &&
3464 (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
3465 return HexagonII::HCG_B;
3466 break;
3467 // Transfer and jump:
3468 // Rd=#U6 ; jump #r9:2
3469 // Rd=Rs ; jump #r9:2
3470 // Do not test for jump range here.
3471 case Hexagon::J2_jump:
3472 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3473 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3474 return HexagonII::HCG_C;
3475 }
3476
3477 return HexagonII::HCG_None;
3478}
3479
3480// Returns -1 when there is no opcode found.
3481unsigned HexagonInstrInfo::getCompoundOpcode(const MachineInstr &GA,
3482 const MachineInstr &GB) const {
3483 assert(getCompoundCandidateGroup(GA) == HexagonII::HCG_A)(static_cast <bool> (getCompoundCandidateGroup(GA) == HexagonII
::HCG_A) ? void (0) : __assert_fail ("getCompoundCandidateGroup(GA) == HexagonII::HCG_A"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3483, __extension__
__PRETTY_FUNCTION__))
;
3484 assert(getCompoundCandidateGroup(GB) == HexagonII::HCG_B)(static_cast <bool> (getCompoundCandidateGroup(GB) == HexagonII
::HCG_B) ? void (0) : __assert_fail ("getCompoundCandidateGroup(GB) == HexagonII::HCG_B"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3484, __extension__
__PRETTY_FUNCTION__))
;
3485 if ((GA.getOpcode() != Hexagon::C2_cmpeqi) ||
3486 (GB.getOpcode() != Hexagon::J2_jumptnew))
3487 return -1u;
3488 Register DestReg = GA.getOperand(0).getReg();
3489 if (!GB.readsRegister(DestReg))
3490 return -1u;
3491 if (DestReg != Hexagon::P0 && DestReg != Hexagon::P1)
3492 return -1u;
3493 // The value compared against must be either u5 or -1.
3494 const MachineOperand &CmpOp = GA.getOperand(2);
3495 if (!CmpOp.isImm())
3496 return -1u;
3497 int V = CmpOp.getImm();
3498 if (V == -1)
3499 return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqn1_tp0_jump_nt
3500 : Hexagon::J4_cmpeqn1_tp1_jump_nt;
3501 if (!isUInt<5>(V))
3502 return -1u;
3503 return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqi_tp0_jump_nt
3504 : Hexagon::J4_cmpeqi_tp1_jump_nt;
3505}
3506
3507// Returns -1 if there is no opcode found.
3508int HexagonInstrInfo::getDuplexOpcode(const MachineInstr &MI,
3509 bool ForBigCore) const {
3510 // Static table to switch the opcodes across Tiny Core and Big Core.
3511 // dup_ opcodes are Big core opcodes.
3512 // NOTE: There are special instructions that need to handled later.
3513 // L4_return* instructions, they will only occupy SLOT0 (on big core too).
3514 // PS_jmpret - This pseudo translates to J2_jumpr which occupies only SLOT2.
3515 // The compiler need to base the root instruction to L6_return_map_to_raw
3516 // which can go any slot.
3517 static const std::map<unsigned, unsigned> DupMap = {
3518 {Hexagon::A2_add, Hexagon::dup_A2_add},
3519 {Hexagon::A2_addi, Hexagon::dup_A2_addi},
3520 {Hexagon::A2_andir, Hexagon::dup_A2_andir},
3521 {Hexagon::A2_combineii, Hexagon::dup_A2_combineii},
3522 {Hexagon::A2_sxtb, Hexagon::dup_A2_sxtb},
3523 {Hexagon::A2_sxth, Hexagon::dup_A2_sxth},
3524 {Hexagon::A2_tfr, Hexagon::dup_A2_tfr},
3525 {Hexagon::A2_tfrsi, Hexagon::dup_A2_tfrsi},
3526 {Hexagon::A2_zxtb, Hexagon::dup_A2_zxtb},
3527 {Hexagon::A2_zxth, Hexagon::dup_A2_zxth},
3528 {Hexagon::A4_combineii, Hexagon::dup_A4_combineii},
3529 {Hexagon::A4_combineir, Hexagon::dup_A4_combineir},
3530 {Hexagon::A4_combineri, Hexagon::dup_A4_combineri},
3531 {Hexagon::C2_cmoveif, Hexagon::dup_C2_cmoveif},
3532 {Hexagon::C2_cmoveit, Hexagon::dup_C2_cmoveit},
3533 {Hexagon::C2_cmovenewif, Hexagon::dup_C2_cmovenewif},
3534 {Hexagon::C2_cmovenewit, Hexagon::dup_C2_cmovenewit},
3535 {Hexagon::C2_cmpeqi, Hexagon::dup_C2_cmpeqi},
3536 {Hexagon::L2_deallocframe, Hexagon::dup_L2_deallocframe},
3537 {Hexagon::L2_loadrb_io, Hexagon::dup_L2_loadrb_io},
3538 {Hexagon::L2_loadrd_io, Hexagon::dup_L2_loadrd_io},
3539 {Hexagon::L2_loadrh_io, Hexagon::dup_L2_loadrh_io},
3540 {Hexagon::L2_loadri_io, Hexagon::dup_L2_loadri_io},
3541 {Hexagon::L2_loadrub_io, Hexagon::dup_L2_loadrub_io},
3542 {Hexagon::L2_loadruh_io, Hexagon::dup_L2_loadruh_io},
3543 {Hexagon::S2_allocframe, Hexagon::dup_S2_allocframe},
3544 {Hexagon::S2_storerb_io, Hexagon::dup_S2_storerb_io},
3545 {Hexagon::S2_storerd_io, Hexagon::dup_S2_storerd_io},
3546 {Hexagon::S2_storerh_io, Hexagon::dup_S2_storerh_io},
3547 {Hexagon::S2_storeri_io, Hexagon::dup_S2_storeri_io},
3548 {Hexagon::S4_storeirb_io, Hexagon::dup_S4_storeirb_io},
3549 {Hexagon::S4_storeiri_io, Hexagon::dup_S4_storeiri_io},
3550 };
3551 unsigned OpNum = MI.getOpcode();
3552 // Conversion to Big core.
3553 if (ForBigCore) {
3554 auto Iter = DupMap.find(OpNum);
3555 if (Iter != DupMap.end())
3556 return Iter->second;
3557 } else { // Conversion to Tiny core.
3558 for (const auto &Iter : DupMap)
3559 if (Iter.second == OpNum)
3560 return Iter.first;
3561 }
3562 return -1;
3563}
3564
3565int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const {
3566 enum Hexagon::PredSense inPredSense;
3567 inPredSense = invertPredicate ? Hexagon::PredSense_false :
3568 Hexagon::PredSense_true;
3569 int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
3570 if (CondOpcode >= 0) // Valid Conditional opcode/instruction
3571 return CondOpcode;
3572
3573 llvm_unreachable("Unexpected predicable instruction")::llvm::llvm_unreachable_internal("Unexpected predicable instruction"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3573)
;
3574}
3575
3576// Return the cur value instruction for a given store.
3577int HexagonInstrInfo::getDotCurOp(const MachineInstr &MI) const {
3578 switch (MI.getOpcode()) {
3579 default: llvm_unreachable("Unknown .cur type")::llvm::llvm_unreachable_internal("Unknown .cur type", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 3579)
;
3580 case Hexagon::V6_vL32b_pi:
3581 return Hexagon::V6_vL32b_cur_pi;
3582 case Hexagon::V6_vL32b_ai:
3583 return Hexagon::V6_vL32b_cur_ai;
3584 case Hexagon::V6_vL32b_nt_pi:
3585 return Hexagon::V6_vL32b_nt_cur_pi;
3586 case Hexagon::V6_vL32b_nt_ai:
3587 return Hexagon::V6_vL32b_nt_cur_ai;
3588 case Hexagon::V6_vL32b_ppu:
3589 return Hexagon::V6_vL32b_cur_ppu;
3590 case Hexagon::V6_vL32b_nt_ppu:
3591 return Hexagon::V6_vL32b_nt_cur_ppu;
3592 }
3593 return 0;
3594}
3595
3596// Return the regular version of the .cur instruction.
3597int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
3598 switch (MI.getOpcode()) {
3599 default: llvm_unreachable("Unknown .cur type")::llvm::llvm_unreachable_internal("Unknown .cur type", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 3599)
;
3600 case Hexagon::V6_vL32b_cur_pi:
3601 return Hexagon::V6_vL32b_pi;
3602 case Hexagon::V6_vL32b_cur_ai:
3603 return Hexagon::V6_vL32b_ai;
3604 case Hexagon::V6_vL32b_nt_cur_pi:
3605 return Hexagon::V6_vL32b_nt_pi;
3606 case Hexagon::V6_vL32b_nt_cur_ai:
3607 return Hexagon::V6_vL32b_nt_ai;
3608 case Hexagon::V6_vL32b_cur_ppu:
3609 return Hexagon::V6_vL32b_ppu;
3610 case Hexagon::V6_vL32b_nt_cur_ppu:
3611 return Hexagon::V6_vL32b_nt_ppu;
3612 }
3613 return 0;
3614}
3615
3616// The diagram below shows the steps involved in the conversion of a predicated
3617// store instruction to its .new predicated new-value form.
3618//
3619// Note: It doesn't include conditional new-value stores as they can't be
3620// converted to .new predicate.
3621//
3622// p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
3623// ^ ^
3624// / \ (not OK. it will cause new-value store to be
3625// / X conditional on p0.new while R2 producer is
3626// / \ on p0)
3627// / \.
3628// p.new store p.old NV store
3629// [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
3630// ^ ^
3631// \ /
3632// \ /
3633// \ /
3634// p.old store
3635// [if (p0)memw(R0+#0)=R2]
3636//
3637// The following set of instructions further explains the scenario where
3638// conditional new-value store becomes invalid when promoted to .new predicate
3639// form.
3640//
3641// { 1) if (p0) r0 = add(r1, r2)
3642// 2) p0 = cmp.eq(r3, #0) }
3643//
3644// 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
3645// the first two instructions because in instr 1, r0 is conditional on old value
3646// of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
3647// is not valid for new-value stores.
3648// Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
3649// from the "Conditional Store" list. Because a predicated new value store
3650// would NOT be promoted to a double dot new store. See diagram below:
3651// This function returns yes for those stores that are predicated but not
3652// yet promoted to predicate dot new instructions.
3653//
3654// +---------------------+
3655// /-----| if (p0) memw(..)=r0 |---------\~
3656// || +---------------------+ ||
3657// promote || /\ /\ || promote
3658// || /||\ /||\ ||
3659// \||/ demote || \||/
3660// \/ || || \/
3661// +-------------------------+ || +-------------------------+
3662// | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
3663// +-------------------------+ || +-------------------------+
3664// || || ||
3665// || demote \||/
3666// promote || \/ NOT possible
3667// || || /\~
3668// \||/ || /||\~
3669// \/ || ||
3670// +-----------------------------+
3671// | if (p0.new) memw(..)=r0.new |
3672// +-----------------------------+
3673// Double Dot New Store
3674//
3675// Returns the most basic instruction for the .new predicated instructions and
3676// new-value stores.
3677// For example, all of the following instructions will be converted back to the
3678// same instruction:
3679// 1) if (p0.new) memw(R0+#0) = R1.new --->
3680// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
3681// 3) if (p0.new) memw(R0+#0) = R1 --->
3682//
3683// To understand the translation of instruction 1 to its original form, consider
3684// a packet with 3 instructions.
3685// { p0 = cmp.eq(R0,R1)
3686// if (p0.new) R2 = add(R3, R4)
3687// R5 = add (R3, R1)
3688// }
3689// if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
3690//
3691// This instruction can be part of the previous packet only if both p0 and R2
3692// are promoted to .new values. This promotion happens in steps, first
3693// predicate register is promoted to .new and in the next iteration R2 is
3694// promoted. Therefore, in case of dependence check failure (due to R5) during
3695// next iteration, it should be converted back to its most basic form.
3696
3697// Return the new value instruction for a given store.
3698int HexagonInstrInfo::getDotNewOp(const MachineInstr &MI) const {
3699 int NVOpcode = Hexagon::getNewValueOpcode(MI.getOpcode());
3700 if (NVOpcode >= 0) // Valid new-value store instruction.
3701 return NVOpcode;
3702
3703 switch (MI.getOpcode()) {
3704 default:
3705 report_fatal_error(Twine("Unknown .new type: ") +
3706 std::to_string(MI.getOpcode()));
3707 case Hexagon::S4_storerb_ur:
3708 return Hexagon::S4_storerbnew_ur;
3709
3710 case Hexagon::S2_storerb_pci:
3711 return Hexagon::S2_storerb_pci;
3712
3713 case Hexagon::S2_storeri_pci:
3714 return Hexagon::S2_storeri_pci;
3715
3716 case Hexagon::S2_storerh_pci:
3717 return Hexagon::S2_storerh_pci;
3718
3719 case Hexagon::S2_storerd_pci:
3720 return Hexagon::S2_storerd_pci;
3721
3722 case Hexagon::S2_storerf_pci:
3723 return Hexagon::S2_storerf_pci;
3724
3725 case Hexagon::V6_vS32b_ai:
3726 return Hexagon::V6_vS32b_new_ai;
3727
3728 case Hexagon::V6_vS32b_pi:
3729 return Hexagon::V6_vS32b_new_pi;
3730 }
3731 return 0;
3732}
3733
3734// Returns the opcode to use when converting MI, which is a conditional jump,
3735// into a conditional instruction which uses the .new value of the predicate.
3736// We also use branch probabilities to add a hint to the jump.
3737// If MBPI is null, all edges will be treated as equally likely for the
3738// purposes of establishing a predication hint.
3739int HexagonInstrInfo::getDotNewPredJumpOp(const MachineInstr &MI,
3740 const MachineBranchProbabilityInfo *MBPI) const {
3741 // We assume that block can have at most two successors.
3742 const MachineBasicBlock *Src = MI.getParent();
3743 const MachineOperand &BrTarget = MI.getOperand(1);
3744 bool Taken = false;
3745 const BranchProbability OneHalf(1, 2);
3746
3747 auto getEdgeProbability = [MBPI] (const MachineBasicBlock *Src,
3748 const MachineBasicBlock *Dst) {
3749 if (MBPI)
3750 return MBPI->getEdgeProbability(Src, Dst);
3751 return BranchProbability(1, Src->succ_size());
3752 };
3753
3754 if (BrTarget.isMBB()) {
3755 const MachineBasicBlock *Dst = BrTarget.getMBB();
3756 Taken = getEdgeProbability(Src, Dst) >= OneHalf;
3757 } else {
3758 // The branch target is not a basic block (most likely a function).
3759 // Since BPI only gives probabilities for targets that are basic blocks,
3760 // try to identify another target of this branch (potentially a fall-
3761 // -through) and check the probability of that target.
3762 //
3763 // The only handled branch combinations are:
3764 // - one conditional branch,
3765 // - one conditional branch followed by one unconditional branch.
3766 // Otherwise, assume not-taken.
3767 assert(MI.isConditionalBranch())(static_cast <bool> (MI.isConditionalBranch()) ? void (
0) : __assert_fail ("MI.isConditionalBranch()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 3767, __extension__ __PRETTY_FUNCTION__))
;
3768 const MachineBasicBlock &B = *MI.getParent();
3769 bool SawCond = false, Bad = false;
3770 for (const MachineInstr &I : B) {
3771 if (!I.isBranch())
3772 continue;
3773 if (I.isConditionalBranch()) {
3774 SawCond = true;
3775 if (&I != &MI) {
3776 Bad = true;
3777 break;
3778 }
3779 }
3780 if (I.isUnconditionalBranch() && !SawCond) {
3781 Bad = true;
3782 break;
3783 }
3784 }
3785 if (!Bad) {
3786 MachineBasicBlock::const_instr_iterator It(MI);
3787 MachineBasicBlock::const_instr_iterator NextIt = std::next(It);
3788 if (NextIt == B.instr_end()) {
3789 // If this branch is the last, look for the fall-through block.
3790 for (const MachineBasicBlock *SB : B.successors()) {
3791 if (!B.isLayoutSuccessor(SB))
3792 continue;
3793 Taken = getEdgeProbability(Src, SB) < OneHalf;
3794 break;
3795 }
3796 } else {
3797 assert(NextIt->isUnconditionalBranch())(static_cast <bool> (NextIt->isUnconditionalBranch()
) ? void (0) : __assert_fail ("NextIt->isUnconditionalBranch()"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3797, __extension__
__PRETTY_FUNCTION__))
;
3798 // Find the first MBB operand and assume it's the target.
3799 const MachineBasicBlock *BT = nullptr;
3800 for (const MachineOperand &Op : NextIt->operands()) {
3801 if (!Op.isMBB())
3802 continue;
3803 BT = Op.getMBB();
3804 break;
3805 }
3806 Taken = BT && getEdgeProbability(Src, BT) < OneHalf;
3807 }
3808 } // if (!Bad)
3809 }
3810
3811 // The Taken flag should be set to something reasonable by this point.
3812
3813 switch (MI.getOpcode()) {
3814 case Hexagon::J2_jumpt:
3815 return Taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3816 case Hexagon::J2_jumpf:
3817 return Taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3818
3819 default:
3820 llvm_unreachable("Unexpected jump instruction.")::llvm::llvm_unreachable_internal("Unexpected jump instruction."
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3820)
;
3821 }
3822}
3823
3824// Return .new predicate version for an instruction.
3825int HexagonInstrInfo::getDotNewPredOp(const MachineInstr &MI,
3826 const MachineBranchProbabilityInfo *MBPI) const {
3827 switch (MI.getOpcode()) {
3828 // Condtional Jumps
3829 case Hexagon::J2_jumpt:
3830 case Hexagon::J2_jumpf:
3831 return getDotNewPredJumpOp(MI, MBPI);
3832 }
3833
3834 int NewOpcode = Hexagon::getPredNewOpcode(MI.getOpcode());
3835 if (NewOpcode >= 0)
3836 return NewOpcode;
3837 return 0;
3838}
3839
3840int HexagonInstrInfo::getDotOldOp(const MachineInstr &MI) const {
3841 int NewOp = MI.getOpcode();
3842 if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
3843 NewOp = Hexagon::getPredOldOpcode(NewOp);
3844 // All Hexagon architectures have prediction bits on dot-new branches,
3845 // but only Hexagon V60+ has prediction bits on dot-old ones. Make sure
3846 // to pick the right opcode when converting back to dot-old.
3847 if (!Subtarget.hasFeature(Hexagon::ArchV60)) {
3848 switch (NewOp) {
3849 case Hexagon::J2_jumptpt:
3850 NewOp = Hexagon::J2_jumpt;
3851 break;
3852 case Hexagon::J2_jumpfpt:
3853 NewOp = Hexagon::J2_jumpf;
3854 break;
3855 case Hexagon::J2_jumprtpt:
3856 NewOp = Hexagon::J2_jumprt;
3857 break;
3858 case Hexagon::J2_jumprfpt:
3859 NewOp = Hexagon::J2_jumprf;
3860 break;
3861 }
3862 }
3863 assert(NewOp >= 0 &&(static_cast <bool> (NewOp >= 0 && "Couldn't change predicate new instruction to its old form."
) ? void (0) : __assert_fail ("NewOp >= 0 && \"Couldn't change predicate new instruction to its old form.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3864, __extension__
__PRETTY_FUNCTION__))
3864 "Couldn't change predicate new instruction to its old form.")(static_cast <bool> (NewOp >= 0 && "Couldn't change predicate new instruction to its old form."
) ? void (0) : __assert_fail ("NewOp >= 0 && \"Couldn't change predicate new instruction to its old form.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3864, __extension__
__PRETTY_FUNCTION__))
;
3865 }
3866
3867 if (isNewValueStore(NewOp)) { // Convert into non-new-value format
3868 NewOp = Hexagon::getNonNVStore(NewOp);
3869 assert(NewOp >= 0 && "Couldn't change new-value store to its old form.")(static_cast <bool> (NewOp >= 0 && "Couldn't change new-value store to its old form."
) ? void (0) : __assert_fail ("NewOp >= 0 && \"Couldn't change new-value store to its old form.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3869, __extension__
__PRETTY_FUNCTION__))
;
3870 }
3871
3872 if (Subtarget.hasV60Ops())
3873 return NewOp;
3874
3875 // Subtargets prior to V60 didn't support 'taken' forms of predicated jumps.
3876 switch (NewOp) {
3877 case Hexagon::J2_jumpfpt:
3878 return Hexagon::J2_jumpf;
3879 case Hexagon::J2_jumptpt:
3880 return Hexagon::J2_jumpt;
3881 case Hexagon::J2_jumprfpt:
3882 return Hexagon::J2_jumprf;
3883 case Hexagon::J2_jumprtpt:
3884 return Hexagon::J2_jumprt;
3885 }
3886 return NewOp;
3887}
3888
3889// See if instruction could potentially be a duplex candidate.
3890// If so, return its group. Zero otherwise.
3891HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup(
3892 const MachineInstr &MI) const {
3893 Register DstReg, SrcReg, Src1Reg, Src2Reg;
3894 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
3895
3896 switch (MI.getOpcode()) {
3897 default:
3898 return HexagonII::HSIG_None;
3899 //
3900 // Group L1:
3901 //
3902 // Rd = memw(Rs+#u4:2)
3903 // Rd = memub(Rs+#u4:0)
3904 case Hexagon::L2_loadri_io:
3905 case Hexagon::dup_L2_loadri_io:
3906 DstReg = MI.getOperand(0).getReg();
3907 SrcReg = MI.getOperand(1).getReg();
3908 // Special case this one from Group L2.
3909 // Rd = memw(r29+#u5:2)
3910 if (isIntRegForSubInst(DstReg)) {
3911 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3912 HRI.getStackRegister() == SrcReg &&
3913 MI.getOperand(2).isImm() &&
3914 isShiftedUInt<5,2>(MI.getOperand(2).getImm()))
3915 return HexagonII::HSIG_L2;
3916 // Rd = memw(Rs+#u4:2)
3917 if (isIntRegForSubInst(SrcReg) &&
3918 (MI.getOperand(2).isImm() &&
3919 isShiftedUInt<4,2>(MI.getOperand(2).getImm())))
3920 return HexagonII::HSIG_L1;
3921 }
3922 break;
3923 case Hexagon::L2_loadrub_io:
3924 case Hexagon::dup_L2_loadrub_io:
3925 // Rd = memub(Rs+#u4:0)
3926 DstReg = MI.getOperand(0).getReg();
3927 SrcReg = MI.getOperand(1).getReg();
3928 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3929 MI.getOperand(2).isImm() && isUInt<4>(MI.getOperand(2).getImm()))
3930 return HexagonII::HSIG_L1;
3931 break;
3932 //
3933 // Group L2:
3934 //
3935 // Rd = memh/memuh(Rs+#u3:1)
3936 // Rd = memb(Rs+#u3:0)
3937 // Rd = memw(r29+#u5:2) - Handled above.
3938 // Rdd = memd(r29+#u5:3)
3939 // deallocframe
3940 // [if ([!]p0[.new])] dealloc_return
3941 // [if ([!]p0[.new])] jumpr r31
3942 case Hexagon::L2_loadrh_io:
3943 case Hexagon::L2_loadruh_io:
3944 case Hexagon::dup_L2_loadrh_io:
3945 case Hexagon::dup_L2_loadruh_io:
3946 // Rd = memh/memuh(Rs+#u3:1)
3947 DstReg = MI.getOperand(0).getReg();
3948 SrcReg = MI.getOperand(1).getReg();
3949 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3950 MI.getOperand(2).isImm() &&
3951 isShiftedUInt<3,1>(MI.getOperand(2).getImm()))
3952 return HexagonII::HSIG_L2;
3953 break;
3954 case Hexagon::L2_loadrb_io:
3955 case Hexagon::dup_L2_loadrb_io:
3956 // Rd = memb(Rs+#u3:0)
3957 DstReg = MI.getOperand(0).getReg();
3958 SrcReg = MI.getOperand(1).getReg();
3959 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3960 MI.getOperand(2).isImm() &&
3961 isUInt<3>(MI.getOperand(2).getImm()))
3962 return HexagonII::HSIG_L2;
3963 break;
3964 case Hexagon::L2_loadrd_io:
3965 case Hexagon::dup_L2_loadrd_io:
3966 // Rdd = memd(r29+#u5:3)
3967 DstReg = MI.getOperand(0).getReg();
3968 SrcReg = MI.getOperand(1).getReg();
3969 if (isDblRegForSubInst(DstReg, HRI) &&
3970 Hexagon::IntRegsRegClass.contains(SrcReg) &&
3971 HRI.getStackRegister() == SrcReg &&
3972 MI.getOperand(2).isImm() &&
3973 isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
3974 return HexagonII::HSIG_L2;
3975 break;
3976 // dealloc_return is not documented in Hexagon Manual, but marked
3977 // with A_SUBINSN attribute in iset_v4classic.py.
3978 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3979 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3980 case Hexagon::L4_return:
3981 case Hexagon::L2_deallocframe:
3982 case Hexagon::dup_L2_deallocframe:
3983 return HexagonII::HSIG_L2;
3984 case Hexagon::EH_RETURN_JMPR:
3985 case Hexagon::PS_jmpret:
3986 case Hexagon::SL2_jumpr31:
3987 // jumpr r31
3988 // Actual form JMPR implicit-def %pc, implicit %r31, implicit internal %r0
3989 DstReg = MI.getOperand(0).getReg();
3990 if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
3991 return HexagonII::HSIG_L2;
3992 break;
3993 case Hexagon::PS_jmprett:
3994 case Hexagon::PS_jmpretf:
3995 case Hexagon::PS_jmprettnewpt:
3996 case Hexagon::PS_jmpretfnewpt:
3997 case Hexagon::PS_jmprettnew:
3998 case Hexagon::PS_jmpretfnew:
3999 case Hexagon::SL2_jumpr31_t:
4000 case Hexagon::SL2_jumpr31_f:
4001 case Hexagon::SL2_jumpr31_tnew:
4002 case Hexagon::SL2_jumpr31_fnew:
4003 DstReg = MI.getOperand(1).getReg();
4004 SrcReg = MI.getOperand(0).getReg();
4005 // [if ([!]p0[.new])] jumpr r31
4006 if ((Hexagon::PredRegsRegClass.contains(SrcReg) &&
4007 (Hexagon::P0 == SrcReg)) &&
4008 (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)))
4009 return HexagonII::HSIG_L2;
4010 break;
4011 case Hexagon::L4_return_t:
4012 case Hexagon::L4_return_f:
4013 case Hexagon::L4_return_tnew_pnt:
4014 case Hexagon::L4_return_fnew_pnt:
4015 case Hexagon::L4_return_tnew_pt:
4016 case Hexagon::L4_return_fnew_pt:
4017 // [if ([!]p0[.new])] dealloc_return
4018 SrcReg = MI.getOperand(0).getReg();
4019 if (Hexagon::PredRegsRegClass.contains(SrcReg) && (Hexagon::P0 == SrcReg))
4020 return HexagonII::HSIG_L2;
4021 break;
4022 //
4023 // Group S1:
4024 //
4025 // memw(Rs+#u4:2) = Rt
4026 // memb(Rs+#u4:0) = Rt
4027 case Hexagon::S2_storeri_io:
4028 case Hexagon::dup_S2_storeri_io:
4029 // Special case this one from Group S2.
4030 // memw(r29+#u5:2) = Rt
4031 Src1Reg = MI.getOperand(0).getReg();
4032 Src2Reg = MI.getOperand(2).getReg();
4033 if (Hexagon::IntRegsRegClass.contains(Src1Reg) &&
4034 isIntRegForSubInst(Src2Reg) &&
4035 HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
4036 isShiftedUInt<5,2>(MI.getOperand(1).getImm()))
4037 return HexagonII::HSIG_S2;
4038 // memw(Rs+#u4:2) = Rt
4039 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4040 MI.getOperand(1).isImm() &&
4041 isShiftedUInt<4,2>(MI.getOperand(1).getImm()))
4042 return HexagonII::HSIG_S1;
4043 break;
4044 case Hexagon::S2_storerb_io:
4045 case Hexagon::dup_S2_storerb_io:
4046 // memb(Rs+#u4:0) = Rt
4047 Src1Reg = MI.getOperand(0).getReg();
4048 Src2Reg = MI.getOperand(2).getReg();
4049 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4050 MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()))
4051 return HexagonII::HSIG_S1;
4052 break;
4053 //
4054 // Group S2:
4055 //
4056 // memh(Rs+#u3:1) = Rt
4057 // memw(r29+#u5:2) = Rt
4058 // memd(r29+#s6:3) = Rtt
4059 // memw(Rs+#u4:2) = #U1
4060 // memb(Rs+#u4) = #U1
4061 // allocframe(#u5:3)
4062 case Hexagon::S2_storerh_io:
4063 case Hexagon::dup_S2_storerh_io:
4064 // memh(Rs+#u3:1) = Rt
4065 Src1Reg = MI.getOperand(0).getReg();
4066 Src2Reg = MI.getOperand(2).getReg();
4067 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4068 MI.getOperand(1).isImm() &&
4069 isShiftedUInt<3,1>(MI.getOperand(1).getImm()))
4070 return HexagonII::HSIG_S1;
4071 break;
4072 case Hexagon::S2_storerd_io:
4073 case Hexagon::dup_S2_storerd_io:
4074 // memd(r29+#s6:3) = Rtt
4075 Src1Reg = MI.getOperand(0).getReg();
4076 Src2Reg = MI.getOperand(2).getReg();
4077 if (isDblRegForSubInst(Src2Reg, HRI) &&
4078 Hexagon::IntRegsRegClass.contains(Src1Reg) &&
4079 HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
4080 isShiftedInt<6,3>(MI.getOperand(1).getImm()))
4081 return HexagonII::HSIG_S2;
4082 break;
4083 case Hexagon::S4_storeiri_io:
4084 case Hexagon::dup_S4_storeiri_io:
4085 // memw(Rs+#u4:2) = #U1
4086 Src1Reg = MI.getOperand(0).getReg();
4087 if (isIntRegForSubInst(Src1Reg) && MI.getOperand(1).isImm() &&
4088 isShiftedUInt<4,2>(MI.getOperand(1).getImm()) &&
4089 MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
4090 return HexagonII::HSIG_S2;
4091 break;
4092 case Hexagon::S4_storeirb_io:
4093 case Hexagon::dup_S4_storeirb_io:
4094 // memb(Rs+#u4) = #U1
4095 Src1Reg = MI.getOperand(0).getReg();
4096 if (isIntRegForSubInst(Src1Reg) &&
4097 MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()) &&
4098 MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
4099 return HexagonII::HSIG_S2;
4100 break;
4101 case Hexagon::S2_allocframe:
4102 case Hexagon::dup_S2_allocframe:
4103 if (MI.getOperand(2).isImm() &&
4104 isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
4105 return HexagonII::HSIG_S1;
4106 break;
4107 //
4108 // Group A:
4109 //
4110 // Rx = add(Rx,#s7)
4111 // Rd = Rs
4112 // Rd = #u6
4113 // Rd = #-1
4114 // if ([!]P0[.new]) Rd = #0
4115 // Rd = add(r29,#u6:2)
4116 // Rx = add(Rx,Rs)
4117 // P0 = cmp.eq(Rs,#u2)
4118 // Rdd = combine(#0,Rs)
4119 // Rdd = combine(Rs,#0)
4120 // Rdd = combine(#u2,#U2)
4121 // Rd = add(Rs,#1)
4122 // Rd = add(Rs,#-1)
4123 // Rd = sxth/sxtb/zxtb/zxth(Rs)
4124 // Rd = and(Rs,#1)
4125 case Hexagon::A2_addi:
4126 case Hexagon::dup_A2_addi:
4127 DstReg = MI.getOperand(0).getReg();
4128 SrcReg = MI.getOperand(1).getReg();
4129 if (isIntRegForSubInst(DstReg)) {
4130 // Rd = add(r29,#u6:2)
4131 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
4132 HRI.getStackRegister() == SrcReg && MI.getOperand(2).isImm() &&
4133 isShiftedUInt<6,2>(MI.getOperand(2).getImm()))
4134 return HexagonII::HSIG_A;
4135 // Rx = add(Rx,#s7)
4136 if ((DstReg == SrcReg) && MI.getOperand(2).isImm() &&
4137 isInt<7>(MI.getOperand(2).getImm()))
4138 return HexagonII::HSIG_A;
4139 // Rd = add(Rs,#1)
4140 // Rd = add(Rs,#-1)
4141 if (isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
4142 ((MI.getOperand(2).getImm() == 1) ||
4143 (MI.getOperand(2).getImm() == -1)))
4144 return HexagonII::HSIG_A;
4145 }
4146 break;
4147 case Hexagon::A2_add:
4148 case Hexagon::dup_A2_add:
4149 // Rx = add(Rx,Rs)
4150 DstReg = MI.getOperand(0).getReg();
4151 Src1Reg = MI.getOperand(1).getReg();
4152 Src2Reg = MI.getOperand(2).getReg();
4153 if (isIntRegForSubInst(DstReg) && (DstReg == Src1Reg) &&
4154 isIntRegForSubInst(Src2Reg))
4155 return HexagonII::HSIG_A;
4156 break;
4157 case Hexagon::A2_andir:
4158 case Hexagon::dup_A2_andir:
4159 // Same as zxtb.
4160 // Rd16=and(Rs16,#255)
4161 // Rd16=and(Rs16,#1)
4162 DstReg = MI.getOperand(0).getReg();
4163 SrcReg = MI.getOperand(1).getReg();
4164 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
4165 MI.getOperand(2).isImm() &&
4166 ((MI.getOperand(2).getImm() == 1) ||
4167 (MI.getOperand(2).getImm() == 255)))
4168 return HexagonII::HSIG_A;
4169 break;
4170 case Hexagon::A2_tfr:
4171 case Hexagon::dup_A2_tfr:
4172 // Rd = Rs
4173 DstReg = MI.getOperand(0).getReg();
4174 SrcReg = MI.getOperand(1).getReg();
4175 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
4176 return HexagonII::HSIG_A;
4177 break;
4178 case Hexagon::A2_tfrsi:
4179 case Hexagon::dup_A2_tfrsi:
4180 // Rd = #u6
4181 // Do not test for #u6 size since the const is getting extended
4182 // regardless and compound could be formed.
4183 // Rd = #-1
4184 DstReg = MI.getOperand(0).getReg();
4185 if (isIntRegForSubInst(DstReg))
4186 return HexagonII::HSIG_A;
4187 break;
4188 case Hexagon::C2_cmoveit:
4189 case Hexagon::C2_cmovenewit:
4190 case Hexagon::C2_cmoveif:
4191 case Hexagon::C2_cmovenewif:
4192 case Hexagon::dup_C2_cmoveit:
4193 case Hexagon::dup_C2_cmovenewit:
4194 case Hexagon::dup_C2_cmoveif:
4195 case Hexagon::dup_C2_cmovenewif:
4196 // if ([!]P0[.new]) Rd = #0
4197 // Actual form:
4198 // %r16 = C2_cmovenewit internal %p0, 0, implicit undef %r16;
4199 DstReg = MI.getOperand(0).getReg();
4200 SrcReg = MI.getOperand(1).getReg();
4201 if (isIntRegForSubInst(DstReg) &&
4202 Hexagon::PredRegsRegClass.contains(SrcReg) && Hexagon::P0 == SrcReg &&
4203 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0)
4204 return HexagonII::HSIG_A;
4205 break;
4206 case Hexagon::C2_cmpeqi:
4207 case Hexagon::dup_C2_cmpeqi:
4208 // P0 = cmp.eq(Rs,#u2)
4209 DstReg = MI.getOperand(0).getReg();
4210 SrcReg = MI.getOperand(1).getReg();
4211 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
4212 Hexagon::P0 == DstReg && isIntRegForSubInst(SrcReg) &&
4213 MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm()))
4214 return HexagonII::HSIG_A;
4215 break;
4216 case Hexagon::A2_combineii:
4217 case Hexagon::A4_combineii:
4218 case Hexagon::dup_A2_combineii:
4219 case Hexagon::dup_A4_combineii:
4220 // Rdd = combine(#u2,#U2)
4221 DstReg = MI.getOperand(0).getReg();
4222 if (isDblRegForSubInst(DstReg, HRI) &&
4223 ((MI.getOperand(1).isImm() && isUInt<2>(MI.getOperand(1).getImm())) ||
4224 (MI.getOperand(1).isGlobal() &&
4225 isUInt<2>(MI.getOperand(1).getOffset()))) &&
4226 ((MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm())) ||
4227 (MI.getOperand(2).isGlobal() &&
4228 isUInt<2>(MI.getOperand(2).getOffset()))))
4229 return HexagonII::HSIG_A;
4230 break;
4231 case Hexagon::A4_combineri:
4232 case Hexagon::dup_A4_combineri:
4233 // Rdd = combine(Rs,#0)
4234 // Rdd = combine(Rs,#0)
4235 DstReg = MI.getOperand(0).getReg();
4236 SrcReg = MI.getOperand(1).getReg();
4237 if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
4238 ((MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) ||
4239 (MI.getOperand(2).isGlobal() && MI.getOperand(2).getOffset() == 0)))
4240 return HexagonII::HSIG_A;
4241 break;
4242 case Hexagon::A4_combineir:
4243 case Hexagon::dup_A4_combineir:
4244 // Rdd = combine(#0,Rs)
4245 DstReg = MI.getOperand(0).getReg();
4246 SrcReg = MI.getOperand(2).getReg();
4247 if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
4248 ((MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) ||
4249 (MI.getOperand(1).isGlobal() && MI.getOperand(1).getOffset() == 0)))
4250 return HexagonII::HSIG_A;
4251 break;
4252 case Hexagon::A2_sxtb:
4253 case Hexagon::A2_sxth:
4254 case Hexagon::A2_zxtb:
4255 case Hexagon::A2_zxth:
4256 case Hexagon::dup_A2_sxtb:
4257 case Hexagon::dup_A2_sxth:
4258 case Hexagon::dup_A2_zxtb:
4259 case Hexagon::dup_A2_zxth:
4260 // Rd = sxth/sxtb/zxtb/zxth(Rs)
4261 DstReg = MI.getOperand(0).getReg();
4262 SrcReg = MI.getOperand(1).getReg();
4263 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
4264 return HexagonII::HSIG_A;
4265 break;
4266 }
4267
4268 return HexagonII::HSIG_None;
4269}
4270
4271short HexagonInstrInfo::getEquivalentHWInstr(const MachineInstr &MI) const {
4272 return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Real);
4273}
4274
4275unsigned HexagonInstrInfo::getInstrTimingClassLatency(
4276 const InstrItineraryData *ItinData, const MachineInstr &MI) const {
4277 // Default to one cycle for no itinerary. However, an "empty" itinerary may
4278 // still have a MinLatency property, which getStageLatency checks.
4279 if (!ItinData)
4280 return getInstrLatency(ItinData, MI);
4281
4282 if (MI.isTransient())
4283 return 0;
4284 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
4285}
4286
4287/// getOperandLatency - Compute and return the use operand latency of a given
4288/// pair of def and use.
4289/// In most cases, the static scheduling itinerary was enough to determine the
4290/// operand latency. But it may not be possible for instructions with variable
4291/// number of defs / uses.
4292///
4293/// This is a raw interface to the itinerary that may be directly overriden by
4294/// a target. Use computeOperandLatency to get the best estimate of latency.
4295int HexagonInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
4296 const MachineInstr &DefMI,
4297 unsigned DefIdx,
4298 const MachineInstr &UseMI,
4299 unsigned UseIdx) const {
4300 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
4301
4302 // Get DefIdx and UseIdx for super registers.
4303 const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
4304
4305 if (DefMO.isReg() && DefMO.getReg().isPhysical()) {
4306 if (DefMO.isImplicit()) {
4307 for (MCPhysReg SR : HRI.superregs(DefMO.getReg())) {
4308 int Idx = DefMI.findRegisterDefOperandIdx(SR, false, false, &HRI);
4309 if (Idx != -1) {
4310 DefIdx = Idx;
4311 break;
4312 }
4313 }
4314 }
4315
4316 const MachineOperand &UseMO = UseMI.getOperand(UseIdx);
4317 if (UseMO.isImplicit()) {
4318 for (MCPhysReg SR : HRI.superregs(UseMO.getReg())) {
4319 int Idx = UseMI.findRegisterUseOperandIdx(SR, false, &HRI);
4320 if (Idx != -1) {
4321 UseIdx = Idx;
4322 break;
4323 }
4324 }
4325 }
4326 }
4327
4328 int Latency = TargetInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
4329 UseMI, UseIdx);
4330 if (!Latency)
4331 // We should never have 0 cycle latency between two instructions unless
4332 // they can be packetized together. However, this decision can't be made
4333 // here.
4334 Latency = 1;
4335 return Latency;
4336}
4337
4338// inverts the predication logic.
4339// p -> NotP
4340// NotP -> P
4341bool HexagonInstrInfo::getInvertedPredSense(
4342 SmallVectorImpl<MachineOperand> &Cond) const {
4343 if (Cond.empty())
4344 return false;
4345 unsigned Opc = getInvertedPredicatedOpcode(Cond[0].getImm());
4346 Cond[0].setImm(Opc);
4347 return true;
4348}
4349
4350unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
4351 int InvPredOpcode;
4352 InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
4353 : Hexagon::getTruePredOpcode(Opc);
4354 if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
4355 return InvPredOpcode;
4356
4357 llvm_unreachable("Unexpected predicated instruction")::llvm::llvm_unreachable_internal("Unexpected predicated instruction"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4357)
;
4358}
4359
4360// Returns the max value that doesn't need to be extended.
4361int HexagonInstrInfo::getMaxValue(const MachineInstr &MI) const {
4362 const uint64_t F = MI.getDesc().TSFlags;
4363 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
4364 & HexagonII::ExtentSignedMask;
4365 unsigned bits = (F >> HexagonII::ExtentBitsPos)
4366 & HexagonII::ExtentBitsMask;
4367
4368 if (isSigned) // if value is signed
4369 return ~(-1U << (bits - 1));
4370 else
4371 return ~(-1U << bits);
4372}
4373
4374
4375bool HexagonInstrInfo::isAddrModeWithOffset(const MachineInstr &MI) const {
4376 switch (MI.getOpcode()) {
4377 case Hexagon::L2_loadrbgp:
4378 case Hexagon::L2_loadrdgp:
4379 case Hexagon::L2_loadrhgp:
4380 case Hexagon::L2_loadrigp:
4381 case Hexagon::L2_loadrubgp:
4382 case Hexagon::L2_loadruhgp:
4383 case Hexagon::S2_storerbgp:
4384 case Hexagon::S2_storerbnewgp:
4385 case Hexagon::S2_storerhgp:
4386 case Hexagon::S2_storerhnewgp:
4387 case Hexagon::S2_storerigp:
4388 case Hexagon::S2_storerinewgp:
4389 case Hexagon::S2_storerdgp:
4390 case Hexagon::S2_storerfgp:
4391 return true;
4392 }
4393 const uint64_t F = MI.getDesc().TSFlags;
4394 unsigned addrMode =
4395 ((F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask);
4396 // Disallow any base+offset instruction. The assembler does not yet reorder
4397 // based up any zero offset instruction.
4398 return (addrMode == HexagonII::BaseRegOffset ||
4399 addrMode == HexagonII::BaseImmOffset ||
4400 addrMode == HexagonII::BaseLongOffset);
4401}
4402
4403bool HexagonInstrInfo::isPureSlot0(const MachineInstr &MI) const {
4404 // Workaround for the Global Scheduler. Sometimes, it creates
4405 // A4_ext as a Pseudo instruction and calls this function to see if
4406 // it can be added to an existing bundle. Since the instruction doesn't
4407 // belong to any BB yet, we can't use getUnits API.
4408 if (MI.getOpcode() == Hexagon::A4_ext)
4409 return false;
4410
4411 unsigned FuncUnits = getUnits(MI);
4412 return HexagonFUnits::isSlot0Only(FuncUnits);
4413}
4414
4415bool HexagonInstrInfo::isRestrictNoSlot1Store(const MachineInstr &MI) const {
4416 const uint64_t F = MI.getDesc().TSFlags;
4417 return ((F >> HexagonII::RestrictNoSlot1StorePos) &
4418 HexagonII::RestrictNoSlot1StoreMask);
4419}
4420
4421void HexagonInstrInfo::changeDuplexOpcode(MachineBasicBlock::instr_iterator MII,
4422 bool ToBigInstrs) const {
4423 int Opcode = -1;
4424 if (ToBigInstrs) { // To BigCore Instr.
4425 // Check if the instruction can form a Duplex.
4426 if (getDuplexCandidateGroup(*MII))
4427 // Get the opcode marked "dup_*" tag.
4428 Opcode = getDuplexOpcode(*MII, ToBigInstrs);
4429 } else // To TinyCore Instr.
4430 Opcode = getDuplexOpcode(*MII, ToBigInstrs);
4431
4432 // Change the opcode of the instruction.
4433 if (Opcode >= 0)
4434 MII->setDesc(get(Opcode));
4435}
4436
4437// This function is used to translate instructions to facilitate generating
4438// Duplexes on TinyCore.
4439void HexagonInstrInfo::translateInstrsForDup(MachineFunction &MF,
4440 bool ToBigInstrs) const {
4441 for (auto &MB : MF)
4442 for (MachineBasicBlock::instr_iterator Instr = MB.instr_begin(),
4443 End = MB.instr_end();
4444 Instr != End; ++Instr)
4445 changeDuplexOpcode(Instr, ToBigInstrs);
4446}
4447
4448// This is a specialized form of above function.
4449void HexagonInstrInfo::translateInstrsForDup(
4450 MachineBasicBlock::instr_iterator MII, bool ToBigInstrs) const {
4451 MachineBasicBlock *MBB = MII->getParent();
4452 while ((MII != MBB->instr_end()) && MII->isInsideBundle()) {
4453 changeDuplexOpcode(MII, ToBigInstrs);
4454 ++MII;
4455 }
4456}
4457
4458unsigned HexagonInstrInfo::getMemAccessSize(const MachineInstr &MI) const {
4459 using namespace HexagonII;
4460
4461 const uint64_t F = MI.getDesc().TSFlags;
4462 unsigned S = (F >> MemAccessSizePos) & MemAccesSizeMask;
4463 unsigned Size = getMemAccessSizeInBytes(MemAccessSize(S));
4464 if (Size != 0)
4465 return Size;
4466 // Y2_dcfetchbo is special
4467 if (MI.getOpcode() == Hexagon::Y2_dcfetchbo)
4468 return HexagonII::DoubleWordAccess;
4469
4470 // Handle vector access sizes.
4471 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
4472 switch (S) {
4473 case HexagonII::HVXVectorAccess:
4474 return HRI.getSpillSize(Hexagon::HvxVRRegClass);
4475 default:
4476 llvm_unreachable("Unexpected instruction")::llvm::llvm_unreachable_internal("Unexpected instruction", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4476)
;
4477 }
4478}
4479
4480// Returns the min value that doesn't need to be extended.
4481int HexagonInstrInfo::getMinValue(const MachineInstr &MI) const {
4482 const uint64_t F = MI.getDesc().TSFlags;
4483 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
4484 & HexagonII::ExtentSignedMask;
4485 unsigned bits = (F >> HexagonII::ExtentBitsPos)
4486 & HexagonII::ExtentBitsMask;
4487
4488 if (isSigned) // if value is signed
4489 return -1U << (bits - 1);
4490 else
4491 return 0;
4492}
4493
4494// Returns opcode of the non-extended equivalent instruction.
4495short HexagonInstrInfo::getNonExtOpcode(const MachineInstr &MI) const {
4496 // Check if the instruction has a register form that uses register in place
4497 // of the extended operand, if so return that as the non-extended form.
4498 short NonExtOpcode = Hexagon::getRegForm(MI.getOpcode());
4499 if (NonExtOpcode >= 0)
4500 return NonExtOpcode;
4501
4502 if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
4503 // Check addressing mode and retrieve non-ext equivalent instruction.
4504 switch (getAddrMode(MI)) {
4505 case HexagonII::Absolute:
4506 return Hexagon::changeAddrMode_abs_io(MI.getOpcode());
4507 case HexagonII::BaseImmOffset:
4508 return Hexagon::changeAddrMode_io_rr(MI.getOpcode());
4509 case HexagonII::BaseLongOffset:
4510 return Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
4511
4512 default:
4513 return -1;
4514 }
4515 }
4516 return -1;
4517}
4518
4519bool HexagonInstrInfo::getPredReg(ArrayRef<MachineOperand> Cond,
4520 Register &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const {
4521 if (Cond.empty())
4522 return false;
4523 assert(Cond.size() == 2)(static_cast <bool> (Cond.size() == 2) ? void (0) : __assert_fail
("Cond.size() == 2", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4523, __extension__ __PRETTY_FUNCTION__))
;
4524 if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) {
4525 LLVM_DEBUG(dbgs() << "No predregs for new-value jumps/endloop")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "No predregs for new-value jumps/endloop"
; } } while (false)
;
4526 return false;
4527 }
4528 PredReg = Cond[1].getReg();
4529 PredRegPos = 1;
4530 // See IfConversion.cpp why we add RegState::Implicit | RegState::Undef
4531 PredRegFlags = 0;
4532 if (Cond[1].isImplicit())
4533 PredRegFlags = RegState::Implicit;
4534 if (Cond[1].isUndef())
4535 PredRegFlags |= RegState::Undef;
4536 return true;
4537}
4538
4539short HexagonInstrInfo::getPseudoInstrPair(const MachineInstr &MI) const {
4540 return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Pseudo);
4541}
4542
4543short HexagonInstrInfo::getRegForm(const MachineInstr &MI) const {
4544 return Hexagon::getRegForm(MI.getOpcode());
4545}
4546
4547// Return the number of bytes required to encode the instruction.
4548// Hexagon instructions are fixed length, 4 bytes, unless they
4549// use a constant extender, which requires another 4 bytes.
4550// For debug instructions and prolog labels, return 0.
4551unsigned HexagonInstrInfo::getSize(const MachineInstr &MI) const {
4552 if (MI.isDebugInstr() || MI.isPosition())
4553 return 0;
4554
4555 unsigned Size = MI.getDesc().getSize();
4556 if (!Size)
4557 // Assume the default insn size in case it cannot be determined
4558 // for whatever reason.
4559 Size = HEXAGON_INSTR_SIZE4;
4560
4561 if (isConstExtended(MI) || isExtended(MI))
4562 Size += HEXAGON_INSTR_SIZE4;
4563
4564 // Try and compute number of instructions in asm.
4565 if (BranchRelaxAsmLarge && MI.getOpcode() == Hexagon::INLINEASM) {
4566 const MachineBasicBlock &MBB = *MI.getParent();
4567 const MachineFunction *MF = MBB.getParent();
4568 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
4569
4570 // Count the number of register definitions to find the asm string.
4571 unsigned NumDefs = 0;
4572 for (; MI.getOperand(NumDefs).isReg() && MI.getOperand(NumDefs).isDef();
4573 ++NumDefs)
4574 assert(NumDefs != MI.getNumOperands()-2 && "No asm string?")(static_cast <bool> (NumDefs != MI.getNumOperands()-2 &&
"No asm string?") ? void (0) : __assert_fail ("NumDefs != MI.getNumOperands()-2 && \"No asm string?\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4574, __extension__
__PRETTY_FUNCTION__))
;
4575
4576 assert(MI.getOperand(NumDefs).isSymbol() && "No asm string?")(static_cast <bool> (MI.getOperand(NumDefs).isSymbol() &&
"No asm string?") ? void (0) : __assert_fail ("MI.getOperand(NumDefs).isSymbol() && \"No asm string?\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4576, __extension__
__PRETTY_FUNCTION__))
;
4577 // Disassemble the AsmStr and approximate number of instructions.
4578 const char *AsmStr = MI.getOperand(NumDefs).getSymbolName();
4579 Size = getInlineAsmLength(AsmStr, *MAI);
4580 }
4581
4582 return Size;
4583}
4584
4585uint64_t HexagonInstrInfo::getType(const MachineInstr &MI) const {
4586 const uint64_t F = MI.getDesc().TSFlags;
4587 return (F >> HexagonII::TypePos) & HexagonII::TypeMask;
4588}
4589
4590InstrStage::FuncUnits HexagonInstrInfo::getUnits(const MachineInstr &MI) const {
4591 const InstrItineraryData &II = *Subtarget.getInstrItineraryData();
4592 const InstrStage &IS = *II.beginStage(MI.getDesc().getSchedClass());
4593
4594 return IS.getUnits();
4595}
4596
4597// Calculate size of the basic block without debug instructions.
4598unsigned HexagonInstrInfo::nonDbgBBSize(const MachineBasicBlock *BB) const {
4599 return nonDbgMICount(BB->instr_begin(), BB->instr_end());
4600}
4601
4602unsigned HexagonInstrInfo::nonDbgBundleSize(
4603 MachineBasicBlock::const_iterator BundleHead) const {
4604 assert(BundleHead->isBundle() && "Not a bundle header")(static_cast <bool> (BundleHead->isBundle() &&
"Not a bundle header") ? void (0) : __assert_fail ("BundleHead->isBundle() && \"Not a bundle header\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4604, __extension__
__PRETTY_FUNCTION__))
;
4605 auto MII = BundleHead.getInstrIterator();
4606 // Skip the bundle header.
4607 return nonDbgMICount(++MII, getBundleEnd(BundleHead.getInstrIterator()));
4608}
4609
4610/// immediateExtend - Changes the instruction in place to one using an immediate
4611/// extender.
4612void HexagonInstrInfo::immediateExtend(MachineInstr &MI) const {
4613 assert((isExtendable(MI)||isConstExtended(MI)) &&(static_cast <bool> ((isExtendable(MI)||isConstExtended
(MI)) && "Instruction must be extendable") ? void (0)
: __assert_fail ("(isExtendable(MI)||isConstExtended(MI)) && \"Instruction must be extendable\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4614, __extension__
__PRETTY_FUNCTION__))
4614 "Instruction must be extendable")(static_cast <bool> ((isExtendable(MI)||isConstExtended
(MI)) && "Instruction must be extendable") ? void (0)
: __assert_fail ("(isExtendable(MI)||isConstExtended(MI)) && \"Instruction must be extendable\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4614, __extension__
__PRETTY_FUNCTION__))
;
4615 // Find which operand is extendable.
4616 short ExtOpNum = getCExtOpNum(MI);
4617 MachineOperand &MO = MI.getOperand(ExtOpNum);
4618 // This needs to be something we understand.
4619 assert((MO.isMBB() || MO.isImm()) &&(static_cast <bool> ((MO.isMBB() || MO.isImm()) &&
"Branch with unknown extendable field type") ? void (0) : __assert_fail
("(MO.isMBB() || MO.isImm()) && \"Branch with unknown extendable field type\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4620, __extension__
__PRETTY_FUNCTION__))
4620 "Branch with unknown extendable field type")(static_cast <bool> ((MO.isMBB() || MO.isImm()) &&
"Branch with unknown extendable field type") ? void (0) : __assert_fail
("(MO.isMBB() || MO.isImm()) && \"Branch with unknown extendable field type\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4620, __extension__
__PRETTY_FUNCTION__))
;
4621 // Mark given operand as extended.
4622 MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
4623}
4624
4625bool HexagonInstrInfo::invertAndChangeJumpTarget(
4626 MachineInstr &MI, MachineBasicBlock *NewTarget) const {
4627 LLVM_DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n[invertAndChangeJumpTarget] to "
<< printMBBReference(*NewTarget); MI.dump();; } } while
(false)
4628 << printMBBReference(*NewTarget);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n[invertAndChangeJumpTarget] to "
<< printMBBReference(*NewTarget); MI.dump();; } } while
(false)
4629 MI.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n[invertAndChangeJumpTarget] to "
<< printMBBReference(*NewTarget); MI.dump();; } } while
(false)
;
4630 assert(MI.isBranch())(static_cast <bool> (MI.isBranch()) ? void (0) : __assert_fail
("MI.isBranch()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4630, __extension__ __PRETTY_FUNCTION__))
;
4631 unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode());
4632 int TargetPos = MI.getNumOperands() - 1;
4633 // In general branch target is the last operand,
4634 // but some implicit defs added at the end might change it.
4635 while ((TargetPos > -1) && !MI.getOperand(TargetPos).isMBB())
4636 --TargetPos;
4637 assert((TargetPos >= 0) && MI.getOperand(TargetPos).isMBB())(static_cast <bool> ((TargetPos >= 0) && MI.
getOperand(TargetPos).isMBB()) ? void (0) : __assert_fail ("(TargetPos >= 0) && MI.getOperand(TargetPos).isMBB()"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4637, __extension__
__PRETTY_FUNCTION__))
;
4638 MI.getOperand(TargetPos).setMBB(NewTarget);
4639 if (EnableBranchPrediction && isPredicatedNew(MI)) {
4640 NewOpcode = reversePrediction(NewOpcode);
4641 }
4642 MI.setDesc(get(NewOpcode));
4643 return true;
4644}
4645
4646void HexagonInstrInfo::genAllInsnTimingClasses(MachineFunction &MF) const {
4647 /* +++ The code below is used to generate complete set of Hexagon Insn +++ */
4648 MachineFunction::iterator A = MF.begin();
4649 MachineBasicBlock &B = *A;
4650 MachineBasicBlock::iterator I = B.begin();
4651 DebugLoc DL = I->getDebugLoc();
4652 MachineInstr *NewMI;
4653
4654 for (unsigned insn = TargetOpcode::GENERIC_OP_END+1;
4655 insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
4656 NewMI = BuildMI(B, I, DL, get(insn));
4657 LLVM_DEBUG(dbgs() << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n" << getName
(NewMI->getOpcode()) << " Class: " << NewMI->
getDesc().getSchedClass(); } } while (false)
4658 << getName(NewMI->getOpcode())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n" << getName
(NewMI->getOpcode()) << " Class: " << NewMI->
getDesc().getSchedClass(); } } while (false)
4659 << " Class: " << NewMI->getDesc().getSchedClass())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n" << getName
(NewMI->getOpcode()) << " Class: " << NewMI->
getDesc().getSchedClass(); } } while (false)
;
4660 NewMI->eraseFromParent();
4661 }
4662 /* --- The code above is used to generate complete set of Hexagon Insn --- */
4663}
4664
4665// inverts the predication logic.
4666// p -> NotP
4667// NotP -> P
4668bool HexagonInstrInfo::reversePredSense(MachineInstr &MI) const {
4669 LLVM_DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nTrying to reverse pred. sense of:"
; MI.dump(); } } while (false)
;
4670 MI.setDesc(get(getInvertedPredicatedOpcode(MI.getOpcode())));
4671 return true;
4672}
4673
4674// Reverse the branch prediction.
4675unsigned HexagonInstrInfo::reversePrediction(unsigned Opcode) const {
4676 int PredRevOpcode = -1;
4677 if (isPredictedTaken(Opcode))
4678 PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
4679 else
4680 PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
4681 assert(PredRevOpcode > 0)(static_cast <bool> (PredRevOpcode > 0) ? void (0) :
__assert_fail ("PredRevOpcode > 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4681, __extension__ __PRETTY_FUNCTION__))
;
4682 return PredRevOpcode;
4683}
4684
4685// TODO: Add more rigorous validation.
4686bool HexagonInstrInfo::validateBranchCond(const ArrayRef<MachineOperand> &Cond)
4687 const {
4688 return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1));
4689}
4690
4691void HexagonInstrInfo::
4692setBundleNoShuf(MachineBasicBlock::instr_iterator MIB) const {
4693 assert(MIB->isBundle())(static_cast <bool> (MIB->isBundle()) ? void (0) : __assert_fail
("MIB->isBundle()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4693, __extension__ __PRETTY_FUNCTION__))
;
4694 MachineOperand &Operand = MIB->getOperand(0);
4695 if (Operand.isImm())
4696 Operand.setImm(Operand.getImm() | memShufDisabledMask);
4697 else
4698 MIB->addOperand(MachineOperand::CreateImm(memShufDisabledMask));
4699}
4700
4701bool HexagonInstrInfo::getBundleNoShuf(const MachineInstr &MIB) const {
4702 assert(MIB.isBundle())(static_cast <bool> (MIB.isBundle()) ? void (0) : __assert_fail
("MIB.isBundle()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4702, __extension__ __PRETTY_FUNCTION__))
;
4703 const MachineOperand &Operand = MIB.getOperand(0);
4704 return (Operand.isImm() && (Operand.getImm() & memShufDisabledMask) != 0);
4705}
4706
4707// Addressing mode relations.
4708short HexagonInstrInfo::changeAddrMode_abs_io(short Opc) const {
4709 return Opc >= 0 ? Hexagon::changeAddrMode_abs_io(Opc) : Opc;
4710}
4711
4712short HexagonInstrInfo::changeAddrMode_io_abs(short Opc) const {
4713 return Opc >= 0 ? Hexagon::changeAddrMode_io_abs(Opc) : Opc;
4714}
4715
4716short HexagonInstrInfo::changeAddrMode_io_pi(short Opc) const {
4717 return Opc >= 0 ? Hexagon::changeAddrMode_io_pi(Opc) : Opc;
4718}
4719
4720short HexagonInstrInfo::changeAddrMode_io_rr(short Opc) const {
4721 return Opc >= 0 ? Hexagon::changeAddrMode_io_rr(Opc) : Opc;
4722}
4723
4724short HexagonInstrInfo::changeAddrMode_pi_io(short Opc) const {
4725 return Opc >= 0 ? Hexagon::changeAddrMode_pi_io(Opc) : Opc;
4726}
4727
4728short HexagonInstrInfo::changeAddrMode_rr_io(short Opc) const {
4729 return Opc >= 0 ? Hexagon::changeAddrMode_rr_io(Opc) : Opc;
4730}
4731
4732short HexagonInstrInfo::changeAddrMode_rr_ur(short Opc) const {
4733 return Opc >= 0 ? Hexagon::changeAddrMode_rr_ur(Opc) : Opc;
4734}
4735
4736short HexagonInstrInfo::changeAddrMode_ur_rr(short Opc) const {
4737 return Opc >= 0 ? Hexagon::changeAddrMode_ur_rr(Opc) : Opc;
4738}
4739
4740MCInst HexagonInstrInfo::getNop() const {
4741 static const MCInst Nop = MCInstBuilder(Hexagon::A2_nop);
4742
4743 return MCInstBuilder(Hexagon::BUNDLE)
4744 .addImm(0)
4745 .addInst(&Nop);
4746}