Bug Summary

File:build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
Warning:line 1082, column 43
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name HexagonInstrInfo.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/Hexagon -I include -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-09-04-125545-48738-1 -x c++ /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
1//===- HexagonInstrInfo.cpp - Hexagon Instruction Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the Hexagon implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "HexagonInstrInfo.h"
14#include "Hexagon.h"
15#include "HexagonFrameLowering.h"
16#include "HexagonHazardRecognizer.h"
17#include "HexagonRegisterInfo.h"
18#include "HexagonSubtarget.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/CodeGen/DFAPacketizer.h"
24#include "llvm/CodeGen/LivePhysRegs.h"
25#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstr.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineInstrBundle.h"
32#include "llvm/CodeGen/MachineLoopInfo.h"
33#include "llvm/CodeGen/MachineMemOperand.h"
34#include "llvm/CodeGen/MachineOperand.h"
35#include "llvm/CodeGen/MachineRegisterInfo.h"
36#include "llvm/CodeGen/ScheduleDAG.h"
37#include "llvm/CodeGen/TargetInstrInfo.h"
38#include "llvm/CodeGen/TargetOpcodes.h"
39#include "llvm/CodeGen/TargetRegisterInfo.h"
40#include "llvm/CodeGen/TargetSubtargetInfo.h"
41#include "llvm/IR/DebugLoc.h"
42#include "llvm/MC/MCAsmInfo.h"
43#include "llvm/MC/MCInstBuilder.h"
44#include "llvm/MC/MCInstrDesc.h"
45#include "llvm/MC/MCInstrItineraries.h"
46#include "llvm/MC/MCRegisterInfo.h"
47#include "llvm/Support/BranchProbability.h"
48#include "llvm/Support/CommandLine.h"
49#include "llvm/Support/Debug.h"
50#include "llvm/Support/ErrorHandling.h"
51#include "llvm/Support/MachineValueType.h"
52#include "llvm/Support/MathExtras.h"
53#include "llvm/Support/raw_ostream.h"
54#include "llvm/Target/TargetMachine.h"
55#include <cassert>
56#include <cctype>
57#include <cstdint>
58#include <cstring>
59#include <iterator>
60#include <string>
61#include <utility>
62
63using namespace llvm;
64
65#define DEBUG_TYPE"hexagon-instrinfo" "hexagon-instrinfo"
66
67#define GET_INSTRINFO_CTOR_DTOR
68#define GET_INSTRMAP_INFO
69#include "HexagonDepTimingClasses.h"
70#include "HexagonGenDFAPacketizer.inc"
71#include "HexagonGenInstrInfo.inc"
72
73cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
74 cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
75 "packetization boundary."));
76
77static cl::opt<bool> EnableBranchPrediction("hexagon-enable-branch-prediction",
78 cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"));
79
80static cl::opt<bool> DisableNVSchedule(
81 "disable-hexagon-nv-schedule", cl::Hidden,
82 cl::desc("Disable schedule adjustment for new value stores."));
83
84static cl::opt<bool> EnableTimingClassLatency(
85 "enable-timing-class-latency", cl::Hidden, cl::init(false),
86 cl::desc("Enable timing class latency"));
87
88static cl::opt<bool> EnableALUForwarding(
89 "enable-alu-forwarding", cl::Hidden, cl::init(true),
90 cl::desc("Enable vec alu forwarding"));
91
92static cl::opt<bool> EnableACCForwarding(
93 "enable-acc-forwarding", cl::Hidden, cl::init(true),
94 cl::desc("Enable vec acc forwarding"));
95
96static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large",
97 cl::init(true), cl::Hidden,
98 cl::desc("branch relax asm"));
99
100static cl::opt<bool>
101 UseDFAHazardRec("dfa-hazard-rec", cl::init(true), cl::Hidden,
102 cl::desc("Use the DFA based hazard recognizer."));
103
104/// Constants for Hexagon instructions.
105const int Hexagon_MEMW_OFFSET_MAX = 4095;
106const int Hexagon_MEMW_OFFSET_MIN = -4096;
107const int Hexagon_MEMD_OFFSET_MAX = 8191;
108const int Hexagon_MEMD_OFFSET_MIN = -8192;
109const int Hexagon_MEMH_OFFSET_MAX = 2047;
110const int Hexagon_MEMH_OFFSET_MIN = -2048;
111const int Hexagon_MEMB_OFFSET_MAX = 1023;
112const int Hexagon_MEMB_OFFSET_MIN = -1024;
113const int Hexagon_ADDI_OFFSET_MAX = 32767;
114const int Hexagon_ADDI_OFFSET_MIN = -32768;
115
116// Pin the vtable to this file.
117void HexagonInstrInfo::anchor() {}
118
119HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
120 : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
121 Subtarget(ST) {}
122
123namespace llvm {
124namespace HexagonFUnits {
125 bool isSlot0Only(unsigned units);
126}
127}
128
129static bool isIntRegForSubInst(unsigned Reg) {
130 return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
131 (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
132}
133
134static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI) {
135 return isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_lo)) &&
136 isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_hi));
137}
138
139/// Calculate number of instructions excluding the debug instructions.
140static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB,
141 MachineBasicBlock::const_instr_iterator MIE) {
142 unsigned Count = 0;
143 for (; MIB != MIE; ++MIB) {
144 if (!MIB->isDebugInstr())
145 ++Count;
146 }
147 return Count;
148}
149
150// Check if the A2_tfrsi instruction is cheap or not. If the operand has
151// to be constant-extendend it is not cheap since it occupies two slots
152// in a packet.
153bool HexagonInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
154 // Enable the following steps only at Os/Oz
155 if (!(MI.getMF()->getFunction().hasOptSize()))
156 return MI.isAsCheapAsAMove();
157
158 if (MI.getOpcode() == Hexagon::A2_tfrsi) {
159 auto Op = MI.getOperand(1);
160 // If the instruction has a global address as operand, it is not cheap
161 // since the operand will be constant extended.
162 if (Op.isGlobal())
163 return false;
164 // If the instruction has an operand of size > 16bits, its will be
165 // const-extended and hence, it is not cheap.
166 if (Op.isImm()) {
167 int64_t Imm = Op.getImm();
168 if (!isInt<16>(Imm))
169 return false;
170 }
171 }
172 return MI.isAsCheapAsAMove();
173}
174
175// Do not sink floating point instructions that updates USR register.
176// Example:
177// feclearexcept
178// F2_conv_w2sf
179// fetestexcept
180// MachineSink sinks F2_conv_w2sf and we are not able to catch exceptions.
181// TODO: On some of these floating point instructions, USR is marked as Use.
182// In reality, these instructions also Def the USR. If USR is marked as Def,
183// some of the assumptions in assembler packetization are broken.
184bool HexagonInstrInfo::shouldSink(const MachineInstr &MI) const {
185 // Assumption: A floating point instruction that reads the USR will write
186 // the USR as well.
187 if (isFloat(MI) && MI.hasRegisterImplicitUseOperand(Hexagon::USR))
188 return false;
189 return true;
190}
191
192/// Find the hardware loop instruction used to set-up the specified loop.
193/// On Hexagon, we have two instructions used to set-up the hardware loop
194/// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions
195/// to indicate the end of a loop.
196MachineInstr *HexagonInstrInfo::findLoopInstr(MachineBasicBlock *BB,
197 unsigned EndLoopOp, MachineBasicBlock *TargetBB,
198 SmallPtrSet<MachineBasicBlock *, 8> &Visited) const {
199 unsigned LOOPi;
200 unsigned LOOPr;
201 if (EndLoopOp == Hexagon::ENDLOOP0) {
202 LOOPi = Hexagon::J2_loop0i;
203 LOOPr = Hexagon::J2_loop0r;
204 } else { // EndLoopOp == Hexagon::EndLOOP1
205 LOOPi = Hexagon::J2_loop1i;
206 LOOPr = Hexagon::J2_loop1r;
207 }
208
209 // The loop set-up instruction will be in a predecessor block
210 for (MachineBasicBlock *PB : BB->predecessors()) {
211 // If this has been visited, already skip it.
212 if (!Visited.insert(PB).second)
213 continue;
214 if (PB == BB)
215 continue;
216 for (MachineInstr &I : llvm::reverse(PB->instrs())) {
217 unsigned Opc = I.getOpcode();
218 if (Opc == LOOPi || Opc == LOOPr)
219 return &I;
220 // We've reached a different loop, which means the loop01 has been
221 // removed.
222 if (Opc == EndLoopOp && I.getOperand(0).getMBB() != TargetBB)
223 return nullptr;
224 }
225 // Check the predecessors for the LOOP instruction.
226 if (MachineInstr *Loop = findLoopInstr(PB, EndLoopOp, TargetBB, Visited))
227 return Loop;
228 }
229 return nullptr;
230}
231
232/// Gather register def/uses from MI.
233/// This treats possible (predicated) defs as actually happening ones
234/// (conservatively).
235static inline void parseOperands(const MachineInstr &MI,
236 SmallVector<unsigned, 4> &Defs, SmallVector<unsigned, 8> &Uses) {
237 Defs.clear();
238 Uses.clear();
239
240 for (const MachineOperand &MO : MI.operands()) {
241 if (!MO.isReg())
242 continue;
243
244 Register Reg = MO.getReg();
245 if (!Reg)
246 continue;
247
248 if (MO.isUse())
249 Uses.push_back(MO.getReg());
250
251 if (MO.isDef())
252 Defs.push_back(MO.getReg());
253 }
254}
255
256// Position dependent, so check twice for swap.
257static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) {
258 switch (Ga) {
259 case HexagonII::HSIG_None:
260 default:
261 return false;
262 case HexagonII::HSIG_L1:
263 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_A);
264 case HexagonII::HSIG_L2:
265 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
266 Gb == HexagonII::HSIG_A);
267 case HexagonII::HSIG_S1:
268 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
269 Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_A);
270 case HexagonII::HSIG_S2:
271 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
272 Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_S2 ||
273 Gb == HexagonII::HSIG_A);
274 case HexagonII::HSIG_A:
275 return (Gb == HexagonII::HSIG_A);
276 case HexagonII::HSIG_Compound:
277 return (Gb == HexagonII::HSIG_Compound);
278 }
279 return false;
280}
281
282/// isLoadFromStackSlot - If the specified machine instruction is a direct
283/// load from a stack slot, return the virtual or physical register number of
284/// the destination along with the FrameIndex of the loaded stack slot. If
285/// not, return 0. This predicate must return 0 if the instruction has
286/// any side effects other than loading from the stack slot.
287unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
288 int &FrameIndex) const {
289 switch (MI.getOpcode()) {
290 default:
291 break;
292 case Hexagon::L2_loadri_io:
293 case Hexagon::L2_loadrd_io:
294 case Hexagon::V6_vL32b_ai:
295 case Hexagon::V6_vL32b_nt_ai:
296 case Hexagon::V6_vL32Ub_ai:
297 case Hexagon::LDriw_pred:
298 case Hexagon::LDriw_ctr:
299 case Hexagon::PS_vloadrq_ai:
300 case Hexagon::PS_vloadrw_ai:
301 case Hexagon::PS_vloadrw_nt_ai: {
302 const MachineOperand OpFI = MI.getOperand(1);
303 if (!OpFI.isFI())
304 return 0;
305 const MachineOperand OpOff = MI.getOperand(2);
306 if (!OpOff.isImm() || OpOff.getImm() != 0)
307 return 0;
308 FrameIndex = OpFI.getIndex();
309 return MI.getOperand(0).getReg();
310 }
311
312 case Hexagon::L2_ploadrit_io:
313 case Hexagon::L2_ploadrif_io:
314 case Hexagon::L2_ploadrdt_io:
315 case Hexagon::L2_ploadrdf_io: {
316 const MachineOperand OpFI = MI.getOperand(2);
317 if (!OpFI.isFI())
318 return 0;
319 const MachineOperand OpOff = MI.getOperand(3);
320 if (!OpOff.isImm() || OpOff.getImm() != 0)
321 return 0;
322 FrameIndex = OpFI.getIndex();
323 return MI.getOperand(0).getReg();
324 }
325 }
326
327 return 0;
328}
329
330/// isStoreToStackSlot - If the specified machine instruction is a direct
331/// store to a stack slot, return the virtual or physical register number of
332/// the source reg along with the FrameIndex of the loaded stack slot. If
333/// not, return 0. This predicate must return 0 if the instruction has
334/// any side effects other than storing to the stack slot.
335unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
336 int &FrameIndex) const {
337 switch (MI.getOpcode()) {
338 default:
339 break;
340 case Hexagon::S2_storerb_io:
341 case Hexagon::S2_storerh_io:
342 case Hexagon::S2_storeri_io:
343 case Hexagon::S2_storerd_io:
344 case Hexagon::V6_vS32b_ai:
345 case Hexagon::V6_vS32Ub_ai:
346 case Hexagon::STriw_pred:
347 case Hexagon::STriw_ctr:
348 case Hexagon::PS_vstorerq_ai:
349 case Hexagon::PS_vstorerw_ai: {
350 const MachineOperand &OpFI = MI.getOperand(0);
351 if (!OpFI.isFI())
352 return 0;
353 const MachineOperand &OpOff = MI.getOperand(1);
354 if (!OpOff.isImm() || OpOff.getImm() != 0)
355 return 0;
356 FrameIndex = OpFI.getIndex();
357 return MI.getOperand(2).getReg();
358 }
359
360 case Hexagon::S2_pstorerbt_io:
361 case Hexagon::S2_pstorerbf_io:
362 case Hexagon::S2_pstorerht_io:
363 case Hexagon::S2_pstorerhf_io:
364 case Hexagon::S2_pstorerit_io:
365 case Hexagon::S2_pstorerif_io:
366 case Hexagon::S2_pstorerdt_io:
367 case Hexagon::S2_pstorerdf_io: {
368 const MachineOperand &OpFI = MI.getOperand(1);
369 if (!OpFI.isFI())
370 return 0;
371 const MachineOperand &OpOff = MI.getOperand(2);
372 if (!OpOff.isImm() || OpOff.getImm() != 0)
373 return 0;
374 FrameIndex = OpFI.getIndex();
375 return MI.getOperand(3).getReg();
376 }
377 }
378
379 return 0;
380}
381
382/// This function checks if the instruction or bundle of instructions
383/// has load from stack slot and returns frameindex and machine memory
384/// operand of that instruction if true.
385bool HexagonInstrInfo::hasLoadFromStackSlot(
386 const MachineInstr &MI,
387 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
388 if (MI.isBundle()) {
389 const MachineBasicBlock *MBB = MI.getParent();
390 MachineBasicBlock::const_instr_iterator MII = MI.getIterator();
391 for (++MII; MII != MBB->instr_end() && MII->isInsideBundle(); ++MII)
392 if (TargetInstrInfo::hasLoadFromStackSlot(*MII, Accesses))
393 return true;
394 return false;
395 }
396
397 return TargetInstrInfo::hasLoadFromStackSlot(MI, Accesses);
398}
399
400/// This function checks if the instruction or bundle of instructions
401/// has store to stack slot and returns frameindex and machine memory
402/// operand of that instruction if true.
403bool HexagonInstrInfo::hasStoreToStackSlot(
404 const MachineInstr &MI,
405 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
406 if (MI.isBundle()) {
407 const MachineBasicBlock *MBB = MI.getParent();
408 MachineBasicBlock::const_instr_iterator MII = MI.getIterator();
409 for (++MII; MII != MBB->instr_end() && MII->isInsideBundle(); ++MII)
410 if (TargetInstrInfo::hasStoreToStackSlot(*MII, Accesses))
411 return true;
412 return false;
413 }
414
415 return TargetInstrInfo::hasStoreToStackSlot(MI, Accesses);
416}
417
418/// This function can analyze one/two way branching only and should (mostly) be
419/// called by target independent side.
420/// First entry is always the opcode of the branching instruction, except when
421/// the Cond vector is supposed to be empty, e.g., when analyzeBranch fails, a
422/// BB with only unconditional jump. Subsequent entries depend upon the opcode,
423/// e.g. Jump_c p will have
424/// Cond[0] = Jump_c
425/// Cond[1] = p
426/// HW-loop ENDLOOP:
427/// Cond[0] = ENDLOOP
428/// Cond[1] = MBB
429/// New value jump:
430/// Cond[0] = Hexagon::CMPEQri_f_Jumpnv_t_V4 -- specific opcode
431/// Cond[1] = R
432/// Cond[2] = Imm
433bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
434 MachineBasicBlock *&TBB,
435 MachineBasicBlock *&FBB,
436 SmallVectorImpl<MachineOperand> &Cond,
437 bool AllowModify) const {
438 TBB = nullptr;
439 FBB = nullptr;
440 Cond.clear();
441
442 // If the block has no terminators, it just falls into the block after it.
443 MachineBasicBlock::instr_iterator I = MBB.instr_end();
444 if (I == MBB.instr_begin())
445 return false;
446
447 // A basic block may looks like this:
448 //
449 // [ insn
450 // EH_LABEL
451 // insn
452 // insn
453 // insn
454 // EH_LABEL
455 // insn ]
456 //
457 // It has two succs but does not have a terminator
458 // Don't know how to handle it.
459 do {
460 --I;
461 if (I->isEHLabel())
462 // Don't analyze EH branches.
463 return true;
464 } while (I != MBB.instr_begin());
465
466 I = MBB.instr_end();
467 --I;
468
469 while (I->isDebugInstr()) {
470 if (I == MBB.instr_begin())
471 return false;
472 --I;
473 }
474
475 bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump &&
476 I->getOperand(0).isMBB();
477 // Delete the J2_jump if it's equivalent to a fall-through.
478 if (AllowModify && JumpToBlock &&
479 MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
480 LLVM_DEBUG(dbgs() << "\nErasing the jump to successor block\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nErasing the jump to successor block\n"
;; } } while (false)
;
481 I->eraseFromParent();
482 I = MBB.instr_end();
483 if (I == MBB.instr_begin())
484 return false;
485 --I;
486 }
487 if (!isUnpredicatedTerminator(*I))
488 return false;
489
490 // Get the last instruction in the block.
491 MachineInstr *LastInst = &*I;
492 MachineInstr *SecondLastInst = nullptr;
493 // Find one more terminator if present.
494 while (true) {
495 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
496 if (!SecondLastInst)
497 SecondLastInst = &*I;
498 else
499 // This is a third branch.
500 return true;
501 }
502 if (I == MBB.instr_begin())
503 break;
504 --I;
505 }
506
507 int LastOpcode = LastInst->getOpcode();
508 int SecLastOpcode = SecondLastInst ? SecondLastInst->getOpcode() : 0;
509 // If the branch target is not a basic block, it could be a tail call.
510 // (It is, if the target is a function.)
511 if (LastOpcode == Hexagon::J2_jump && !LastInst->getOperand(0).isMBB())
512 return true;
513 if (SecLastOpcode == Hexagon::J2_jump &&
514 !SecondLastInst->getOperand(0).isMBB())
515 return true;
516
517 bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
518 bool LastOpcodeHasNVJump = isNewValueJump(*LastInst);
519
520 if (LastOpcodeHasJMP_c && !LastInst->getOperand(1).isMBB())
521 return true;
522
523 // If there is only one terminator instruction, process it.
524 if (LastInst && !SecondLastInst) {
525 if (LastOpcode == Hexagon::J2_jump) {
526 TBB = LastInst->getOperand(0).getMBB();
527 return false;
528 }
529 if (isEndLoopN(LastOpcode)) {
530 TBB = LastInst->getOperand(0).getMBB();
531 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
532 Cond.push_back(LastInst->getOperand(0));
533 return false;
534 }
535 if (LastOpcodeHasJMP_c) {
536 TBB = LastInst->getOperand(1).getMBB();
537 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
538 Cond.push_back(LastInst->getOperand(0));
539 return false;
540 }
541 // Only supporting rr/ri versions of new-value jumps.
542 if (LastOpcodeHasNVJump && (LastInst->getNumExplicitOperands() == 3)) {
543 TBB = LastInst->getOperand(2).getMBB();
544 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
545 Cond.push_back(LastInst->getOperand(0));
546 Cond.push_back(LastInst->getOperand(1));
547 return false;
548 }
549 LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nCant analyze " <<
printMBBReference(MBB) << " with one jump\n";; } } while
(false)
550 << " with one jump\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nCant analyze " <<
printMBBReference(MBB) << " with one jump\n";; } } while
(false)
;
551 // Otherwise, don't know what this is.
552 return true;
553 }
554
555 bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
556 bool SecLastOpcodeHasNVJump = isNewValueJump(*SecondLastInst);
557 if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
558 if (!SecondLastInst->getOperand(1).isMBB())
559 return true;
560 TBB = SecondLastInst->getOperand(1).getMBB();
561 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
562 Cond.push_back(SecondLastInst->getOperand(0));
563 FBB = LastInst->getOperand(0).getMBB();
564 return false;
565 }
566
567 // Only supporting rr/ri versions of new-value jumps.
568 if (SecLastOpcodeHasNVJump &&
569 (SecondLastInst->getNumExplicitOperands() == 3) &&
570 (LastOpcode == Hexagon::J2_jump)) {
571 TBB = SecondLastInst->getOperand(2).getMBB();
572 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
573 Cond.push_back(SecondLastInst->getOperand(0));
574 Cond.push_back(SecondLastInst->getOperand(1));
575 FBB = LastInst->getOperand(0).getMBB();
576 return false;
577 }
578
579 // If the block ends with two Hexagon:JMPs, handle it. The second one is not
580 // executed, so remove it.
581 if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
582 TBB = SecondLastInst->getOperand(0).getMBB();
583 I = LastInst->getIterator();
584 if (AllowModify)
585 I->eraseFromParent();
586 return false;
587 }
588
589 // If the block ends with an ENDLOOP, and J2_jump, handle it.
590 if (isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
591 TBB = SecondLastInst->getOperand(0).getMBB();
592 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
593 Cond.push_back(SecondLastInst->getOperand(0));
594 FBB = LastInst->getOperand(0).getMBB();
595 return false;
596 }
597 LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nCant analyze " <<
printMBBReference(MBB) << " with two jumps";; } } while
(false)
598 << " with two jumps";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nCant analyze " <<
printMBBReference(MBB) << " with two jumps";; } } while
(false)
;
599 // Otherwise, can't handle this.
600 return true;
601}
602
603unsigned HexagonInstrInfo::removeBranch(MachineBasicBlock &MBB,
604 int *BytesRemoved) const {
605 assert(!BytesRemoved && "code size not handled")(static_cast <bool> (!BytesRemoved && "code size not handled"
) ? void (0) : __assert_fail ("!BytesRemoved && \"code size not handled\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 605, __extension__
__PRETTY_FUNCTION__))
;
606
607 LLVM_DEBUG(dbgs() << "\nRemoving branches out of " << printMBBReference(MBB))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nRemoving branches out of "
<< printMBBReference(MBB); } } while (false)
;
608 MachineBasicBlock::iterator I = MBB.end();
609 unsigned Count = 0;
610 while (I != MBB.begin()) {
611 --I;
612 if (I->isDebugInstr())
613 continue;
614 // Only removing branches from end of MBB.
615 if (!I->isBranch())
616 return Count;
617 if (Count && (I->getOpcode() == Hexagon::J2_jump))
618 llvm_unreachable("Malformed basic block: unconditional branch not last")::llvm::llvm_unreachable_internal("Malformed basic block: unconditional branch not last"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 618)
;
619 MBB.erase(&MBB.back());
620 I = MBB.end();
621 ++Count;
622 }
623 return Count;
624}
625
626unsigned HexagonInstrInfo::insertBranch(MachineBasicBlock &MBB,
627 MachineBasicBlock *TBB,
628 MachineBasicBlock *FBB,
629 ArrayRef<MachineOperand> Cond,
630 const DebugLoc &DL,
631 int *BytesAdded) const {
632 unsigned BOpc = Hexagon::J2_jump;
633 unsigned BccOpc = Hexagon::J2_jumpt;
634 assert(validateBranchCond(Cond) && "Invalid branching condition")(static_cast <bool> (validateBranchCond(Cond) &&
"Invalid branching condition") ? void (0) : __assert_fail ("validateBranchCond(Cond) && \"Invalid branching condition\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 634, __extension__
__PRETTY_FUNCTION__))
;
635 assert(TBB && "insertBranch must not be told to insert a fallthrough")(static_cast <bool> (TBB && "insertBranch must not be told to insert a fallthrough"
) ? void (0) : __assert_fail ("TBB && \"insertBranch must not be told to insert a fallthrough\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 635, __extension__
__PRETTY_FUNCTION__))
;
636 assert(!BytesAdded && "code size not handled")(static_cast <bool> (!BytesAdded && "code size not handled"
) ? void (0) : __assert_fail ("!BytesAdded && \"code size not handled\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 636, __extension__
__PRETTY_FUNCTION__))
;
637
638 // Check if reverseBranchCondition has asked to reverse this branch
639 // If we want to reverse the branch an odd number of times, we want
640 // J2_jumpf.
641 if (!Cond.empty() && Cond[0].isImm())
642 BccOpc = Cond[0].getImm();
643
644 if (!FBB) {
645 if (Cond.empty()) {
646 // Due to a bug in TailMerging/CFG Optimization, we need to add a
647 // special case handling of a predicated jump followed by an
648 // unconditional jump. If not, Tail Merging and CFG Optimization go
649 // into an infinite loop.
650 MachineBasicBlock *NewTBB, *NewFBB;
651 SmallVector<MachineOperand, 4> Cond;
652 auto Term = MBB.getFirstTerminator();
653 if (Term != MBB.end() && isPredicated(*Term) &&
654 !analyzeBranch(MBB, NewTBB, NewFBB, Cond, false) &&
655 MachineFunction::iterator(NewTBB) == ++MBB.getIterator()) {
656 reverseBranchCondition(Cond);
657 removeBranch(MBB);
658 return insertBranch(MBB, TBB, nullptr, Cond, DL);
659 }
660 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
661 } else if (isEndLoopN(Cond[0].getImm())) {
662 int EndLoopOp = Cond[0].getImm();
663 assert(Cond[1].isMBB())(static_cast <bool> (Cond[1].isMBB()) ? void (0) : __assert_fail
("Cond[1].isMBB()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 663, __extension__ __PRETTY_FUNCTION__))
;
664 // Since we're adding an ENDLOOP, there better be a LOOP instruction.
665 // Check for it, and change the BB target if needed.
666 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
667 MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
668 VisitedBBs);
669 assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP")(static_cast <bool> (Loop != nullptr && "Inserting an ENDLOOP without a LOOP"
) ? void (0) : __assert_fail ("Loop != nullptr && \"Inserting an ENDLOOP without a LOOP\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 669, __extension__
__PRETTY_FUNCTION__))
;
670 Loop->getOperand(0).setMBB(TBB);
671 // Add the ENDLOOP after the finding the LOOP0.
672 BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
673 } else if (isNewValueJump(Cond[0].getImm())) {
674 assert((Cond.size() == 3) && "Only supporting rr/ri version of nvjump")(static_cast <bool> ((Cond.size() == 3) && "Only supporting rr/ri version of nvjump"
) ? void (0) : __assert_fail ("(Cond.size() == 3) && \"Only supporting rr/ri version of nvjump\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 674, __extension__
__PRETTY_FUNCTION__))
;
675 // New value jump
676 // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
677 // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
678 unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
679 LLVM_DEBUG(dbgs() << "\nInserting NVJump for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nInserting NVJump for "
<< printMBBReference(MBB);; } } while (false)
680 << printMBBReference(MBB);)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nInserting NVJump for "
<< printMBBReference(MBB);; } } while (false)
;
681 if (Cond[2].isReg()) {
682 unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
683 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
684 addReg(Cond[2].getReg(), Flags2).addMBB(TBB);
685 } else if(Cond[2].isImm()) {
686 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
687 addImm(Cond[2].getImm()).addMBB(TBB);
688 } else
689 llvm_unreachable("Invalid condition for branching")::llvm::llvm_unreachable_internal("Invalid condition for branching"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 689)
;
690 } else {
691 assert((Cond.size() == 2) && "Malformed cond vector")(static_cast <bool> ((Cond.size() == 2) && "Malformed cond vector"
) ? void (0) : __assert_fail ("(Cond.size() == 2) && \"Malformed cond vector\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 691, __extension__
__PRETTY_FUNCTION__))
;
692 const MachineOperand &RO = Cond[1];
693 unsigned Flags = getUndefRegState(RO.isUndef());
694 BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
695 }
696 return 1;
697 }
698 assert((!Cond.empty()) &&(static_cast <bool> ((!Cond.empty()) && "Cond. cannot be empty when multiple branchings are required"
) ? void (0) : __assert_fail ("(!Cond.empty()) && \"Cond. cannot be empty when multiple branchings are required\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 699, __extension__
__PRETTY_FUNCTION__))
699 "Cond. cannot be empty when multiple branchings are required")(static_cast <bool> ((!Cond.empty()) && "Cond. cannot be empty when multiple branchings are required"
) ? void (0) : __assert_fail ("(!Cond.empty()) && \"Cond. cannot be empty when multiple branchings are required\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 699, __extension__
__PRETTY_FUNCTION__))
;
700 assert((!isNewValueJump(Cond[0].getImm())) &&(static_cast <bool> ((!isNewValueJump(Cond[0].getImm())
) && "NV-jump cannot be inserted with another branch"
) ? void (0) : __assert_fail ("(!isNewValueJump(Cond[0].getImm())) && \"NV-jump cannot be inserted with another branch\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 701, __extension__
__PRETTY_FUNCTION__))
701 "NV-jump cannot be inserted with another branch")(static_cast <bool> ((!isNewValueJump(Cond[0].getImm())
) && "NV-jump cannot be inserted with another branch"
) ? void (0) : __assert_fail ("(!isNewValueJump(Cond[0].getImm())) && \"NV-jump cannot be inserted with another branch\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 701, __extension__
__PRETTY_FUNCTION__))
;
702 // Special case for hardware loops. The condition is a basic block.
703 if (isEndLoopN(Cond[0].getImm())) {
704 int EndLoopOp = Cond[0].getImm();
705 assert(Cond[1].isMBB())(static_cast <bool> (Cond[1].isMBB()) ? void (0) : __assert_fail
("Cond[1].isMBB()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 705, __extension__ __PRETTY_FUNCTION__))
;
706 // Since we're adding an ENDLOOP, there better be a LOOP instruction.
707 // Check for it, and change the BB target if needed.
708 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
709 MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
710 VisitedBBs);
711 assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP")(static_cast <bool> (Loop != nullptr && "Inserting an ENDLOOP without a LOOP"
) ? void (0) : __assert_fail ("Loop != nullptr && \"Inserting an ENDLOOP without a LOOP\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 711, __extension__
__PRETTY_FUNCTION__))
;
712 Loop->getOperand(0).setMBB(TBB);
713 // Add the ENDLOOP after the finding the LOOP0.
714 BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
715 } else {
716 const MachineOperand &RO = Cond[1];
717 unsigned Flags = getUndefRegState(RO.isUndef());
718 BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
719 }
720 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
721
722 return 2;
723}
724
725namespace {
726class HexagonPipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo {
727 MachineInstr *Loop, *EndLoop;
728 MachineFunction *MF;
729 const HexagonInstrInfo *TII;
730 int64_t TripCount;
731 Register LoopCount;
732 DebugLoc DL;
733
734public:
735 HexagonPipelinerLoopInfo(MachineInstr *Loop, MachineInstr *EndLoop)
736 : Loop(Loop), EndLoop(EndLoop), MF(Loop->getParent()->getParent()),
737 TII(MF->getSubtarget<HexagonSubtarget>().getInstrInfo()),
738 DL(Loop->getDebugLoc()) {
739 // Inspect the Loop instruction up-front, as it may be deleted when we call
740 // createTripCountGreaterCondition.
741 TripCount = Loop->getOpcode() == Hexagon::J2_loop0r
742 ? -1
743 : Loop->getOperand(1).getImm();
744 if (TripCount == -1)
745 LoopCount = Loop->getOperand(1).getReg();
746 }
747
748 bool shouldIgnoreForPipelining(const MachineInstr *MI) const override {
749 // Only ignore the terminator.
750 return MI == EndLoop;
751 }
752
753 Optional<bool>
754 createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
755 SmallVectorImpl<MachineOperand> &Cond) override {
756 if (TripCount == -1) {
757 // Check if we're done with the loop.
758 unsigned Done = TII->createVR(MF, MVT::i1);
759 MachineInstr *NewCmp = BuildMI(&MBB, DL,
760 TII->get(Hexagon::C2_cmpgtui), Done)
761 .addReg(LoopCount)
762 .addImm(TC);
763 Cond.push_back(MachineOperand::CreateImm(Hexagon::J2_jumpf));
764 Cond.push_back(NewCmp->getOperand(0));
765 return {};
766 }
767
768 return TripCount > TC;
769 }
770
771 void setPreheader(MachineBasicBlock *NewPreheader) override {
772 NewPreheader->splice(NewPreheader->getFirstTerminator(), Loop->getParent(),
773 Loop);
774 }
775
776 void adjustTripCount(int TripCountAdjust) override {
777 // If the loop trip count is a compile-time value, then just change the
778 // value.
779 if (Loop->getOpcode() == Hexagon::J2_loop0i ||
780 Loop->getOpcode() == Hexagon::J2_loop1i) {
781 int64_t TripCount = Loop->getOperand(1).getImm() + TripCountAdjust;
782 assert(TripCount > 0 && "Can't create an empty or negative loop!")(static_cast <bool> (TripCount > 0 && "Can't create an empty or negative loop!"
) ? void (0) : __assert_fail ("TripCount > 0 && \"Can't create an empty or negative loop!\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 782, __extension__
__PRETTY_FUNCTION__))
;
783 Loop->getOperand(1).setImm(TripCount);
784 return;
785 }
786
787 // The loop trip count is a run-time value. We generate code to subtract
788 // one from the trip count, and update the loop instruction.
789 Register LoopCount = Loop->getOperand(1).getReg();
790 Register NewLoopCount = TII->createVR(MF, MVT::i32);
791 BuildMI(*Loop->getParent(), Loop, Loop->getDebugLoc(),
792 TII->get(Hexagon::A2_addi), NewLoopCount)
793 .addReg(LoopCount)
794 .addImm(TripCountAdjust);
795 Loop->getOperand(1).setReg(NewLoopCount);
796 }
797
798 void disposed() override { Loop->eraseFromParent(); }
799};
800} // namespace
801
802std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
803HexagonInstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {
804 // We really "analyze" only hardware loops right now.
805 MachineBasicBlock::iterator I = LoopBB->getFirstTerminator();
806
807 if (I != LoopBB->end() && isEndLoopN(I->getOpcode())) {
808 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
809 MachineInstr *LoopInst = findLoopInstr(
810 LoopBB, I->getOpcode(), I->getOperand(0).getMBB(), VisitedBBs);
811 if (LoopInst)
812 return std::make_unique<HexagonPipelinerLoopInfo>(LoopInst, &*I);
813 }
814 return nullptr;
815}
816
817bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
818 unsigned NumCycles, unsigned ExtraPredCycles,
819 BranchProbability Probability) const {
820 return nonDbgBBSize(&MBB) <= 3;
821}
822
823bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
824 unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB,
825 unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability)
826 const {
827 return nonDbgBBSize(&TMBB) <= 3 && nonDbgBBSize(&FMBB) <= 3;
828}
829
830bool HexagonInstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
831 unsigned NumInstrs, BranchProbability Probability) const {
832 return NumInstrs <= 4;
833}
834
835static void getLiveInRegsAt(LivePhysRegs &Regs, const MachineInstr &MI) {
836 SmallVector<std::pair<MCPhysReg, const MachineOperand*>,2> Clobbers;
837 const MachineBasicBlock &B = *MI.getParent();
838 Regs.addLiveIns(B);
839 auto E = MachineBasicBlock::const_iterator(MI.getIterator());
840 for (auto I = B.begin(); I != E; ++I) {
841 Clobbers.clear();
842 Regs.stepForward(*I, Clobbers);
843 }
844}
845
846static void getLiveOutRegsAt(LivePhysRegs &Regs, const MachineInstr &MI) {
847 const MachineBasicBlock &B = *MI.getParent();
848 Regs.addLiveOuts(B);
849 auto E = ++MachineBasicBlock::const_iterator(MI.getIterator()).getReverse();
850 for (auto I = B.rbegin(); I != E; ++I)
851 Regs.stepBackward(*I);
852}
853
854void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
855 MachineBasicBlock::iterator I,
856 const DebugLoc &DL, MCRegister DestReg,
857 MCRegister SrcReg, bool KillSrc) const {
858 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
859 unsigned KillFlag = getKillRegState(KillSrc);
860
861 if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
862 BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg)
863 .addReg(SrcReg, KillFlag);
864 return;
865 }
866 if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
867 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg)
868 .addReg(SrcReg, KillFlag);
869 return;
870 }
871 if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
872 // Map Pd = Ps to Pd = or(Ps, Ps).
873 BuildMI(MBB, I, DL, get(Hexagon::C2_or), DestReg)
874 .addReg(SrcReg).addReg(SrcReg, KillFlag);
875 return;
876 }
877 if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
878 Hexagon::IntRegsRegClass.contains(SrcReg)) {
879 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
880 .addReg(SrcReg, KillFlag);
881 return;
882 }
883 if (Hexagon::IntRegsRegClass.contains(DestReg) &&
884 Hexagon::CtrRegsRegClass.contains(SrcReg)) {
885 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrcrr), DestReg)
886 .addReg(SrcReg, KillFlag);
887 return;
888 }
889 if (Hexagon::ModRegsRegClass.contains(DestReg) &&
890 Hexagon::IntRegsRegClass.contains(SrcReg)) {
891 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
892 .addReg(SrcReg, KillFlag);
893 return;
894 }
895 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
896 Hexagon::IntRegsRegClass.contains(DestReg)) {
897 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
898 .addReg(SrcReg, KillFlag);
899 return;
900 }
901 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
902 Hexagon::PredRegsRegClass.contains(DestReg)) {
903 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg)
904 .addReg(SrcReg, KillFlag);
905 return;
906 }
907 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
908 Hexagon::IntRegsRegClass.contains(DestReg)) {
909 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
910 .addReg(SrcReg, KillFlag);
911 return;
912 }
913 if (Hexagon::HvxVRRegClass.contains(SrcReg, DestReg)) {
914 BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
915 addReg(SrcReg, KillFlag);
916 return;
917 }
918 if (Hexagon::HvxWRRegClass.contains(SrcReg, DestReg)) {
919 LivePhysRegs LiveAtMI(HRI);
920 getLiveInRegsAt(LiveAtMI, *I);
921 Register SrcLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
922 Register SrcHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
923 unsigned UndefLo = getUndefRegState(!LiveAtMI.contains(SrcLo));
924 unsigned UndefHi = getUndefRegState(!LiveAtMI.contains(SrcHi));
925 BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg)
926 .addReg(SrcHi, KillFlag | UndefHi)
927 .addReg(SrcLo, KillFlag | UndefLo);
928 return;
929 }
930 if (Hexagon::HvxQRRegClass.contains(SrcReg, DestReg)) {
931 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg)
932 .addReg(SrcReg)
933 .addReg(SrcReg, KillFlag);
934 return;
935 }
936 if (Hexagon::HvxQRRegClass.contains(SrcReg) &&
937 Hexagon::HvxVRRegClass.contains(DestReg)) {
938 llvm_unreachable("Unimplemented pred to vec")::llvm::llvm_unreachable_internal("Unimplemented pred to vec"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 938)
;
939 return;
940 }
941 if (Hexagon::HvxQRRegClass.contains(DestReg) &&
942 Hexagon::HvxVRRegClass.contains(SrcReg)) {
943 llvm_unreachable("Unimplemented vec to pred")::llvm::llvm_unreachable_internal("Unimplemented vec to pred"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 943)
;
944 return;
945 }
946
947#ifndef NDEBUG
948 // Show the invalid registers to ease debugging.
949 dbgs() << "Invalid registers for copy in " << printMBBReference(MBB) << ": "
950 << printReg(DestReg, &HRI) << " = " << printReg(SrcReg, &HRI) << '\n';
951#endif
952 llvm_unreachable("Unimplemented")::llvm::llvm_unreachable_internal("Unimplemented", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 952)
;
953}
954
955void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
956 MachineBasicBlock::iterator I, Register SrcReg, bool isKill, int FI,
957 const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const {
958 DebugLoc DL = MBB.findDebugLoc(I);
959 MachineFunction &MF = *MBB.getParent();
960 MachineFrameInfo &MFI = MF.getFrameInfo();
961 unsigned KillFlag = getKillRegState(isKill);
962
963 MachineMemOperand *MMO = MF.getMachineMemOperand(
964 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
965 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
966
967 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
968 BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
969 .addFrameIndex(FI).addImm(0)
970 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
971 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
972 BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
973 .addFrameIndex(FI).addImm(0)
974 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
975 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
976 BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
977 .addFrameIndex(FI).addImm(0)
978 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
979 } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
980 BuildMI(MBB, I, DL, get(Hexagon::STriw_ctr))
981 .addFrameIndex(FI).addImm(0)
982 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
983 } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
984 BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai))
985 .addFrameIndex(FI).addImm(0)
986 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
987 } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
988 BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerv_ai))
989 .addFrameIndex(FI).addImm(0)
990 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
991 } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
992 BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerw_ai))
993 .addFrameIndex(FI).addImm(0)
994 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
995 } else {
996 llvm_unreachable("Unimplemented")::llvm::llvm_unreachable_internal("Unimplemented", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 996)
;
997 }
998}
999
1000void HexagonInstrInfo::loadRegFromStackSlot(
1001 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
1002 int FI, const TargetRegisterClass *RC,
1003 const TargetRegisterInfo *TRI) const {
1004 DebugLoc DL = MBB.findDebugLoc(I);
1005 MachineFunction &MF = *MBB.getParent();
1006 MachineFrameInfo &MFI = MF.getFrameInfo();
1007
1008 MachineMemOperand *MMO = MF.getMachineMemOperand(
1009 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
1010 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
1011
1012 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
1013 BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
1014 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1015 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
1016 BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
1017 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1018 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
1019 BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
1020 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1021 } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
1022 BuildMI(MBB, I, DL, get(Hexagon::LDriw_ctr), DestReg)
1023 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1024 } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
1025 BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg)
1026 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1027 } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
1028 BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrv_ai), DestReg)
1029 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1030 } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
1031 BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrw_ai), DestReg)
1032 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1033 } else {
1034 llvm_unreachable("Can't store this register to stack slot")::llvm::llvm_unreachable_internal("Can't store this register to stack slot"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1034)
;
1035 }
1036}
1037
1038/// expandPostRAPseudo - This function is called for all pseudo instructions
1039/// that remain after register allocation. Many pseudo instructions are
1040/// created to help register allocation. This is the place to convert them
1041/// into real instructions. The target can edit MI in place, or it can insert
1042/// new instructions and erase MI. The function should return true if
1043/// anything was changed.
1044bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1045 MachineBasicBlock &MBB = *MI.getParent();
1046 MachineFunction &MF = *MBB.getParent();
1047 MachineRegisterInfo &MRI = MF.getRegInfo();
1048 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1049 LivePhysRegs LiveIn(HRI), LiveOut(HRI);
1050 DebugLoc DL = MI.getDebugLoc();
1051 unsigned Opc = MI.getOpcode();
1052
1053 auto RealCirc = [&](unsigned Opc, bool HasImm, unsigned MxOp) {
1054 Register Mx = MI.getOperand(MxOp).getReg();
1055 unsigned CSx = (Mx == Hexagon::M0 ? Hexagon::CS0 : Hexagon::CS1);
1056 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrrcr), CSx)
1057 .add(MI.getOperand((HasImm ? 5 : 4)));
1058 auto MIB = BuildMI(MBB, MI, DL, get(Opc)).add(MI.getOperand(0))
1059 .add(MI.getOperand(1)).add(MI.getOperand(2)).add(MI.getOperand(3));
1060 if (HasImm)
1061 MIB.add(MI.getOperand(4));
1062 MIB.addReg(CSx, RegState::Implicit);
1063 MBB.erase(MI);
1064 return true;
1065 };
1066
1067 auto UseAligned = [&](const MachineInstr &MI, Align NeedAlign) {
1068 if (MI.memoperands().empty())
1069 return false;
1070 return all_of(MI.memoperands(), [NeedAlign](const MachineMemOperand *MMO) {
1071 return MMO->getAlign() >= NeedAlign;
1072 });
1073 };
1074
1075 switch (Opc) {
1
Control jumps to 'case PS_call_instrprof_custom:' at line 1076
1076 case Hexagon::PS_call_instrprof_custom: {
1077 auto Op0 = MI.getOperand(0);
1078 assert(Op0.isGlobal() &&(static_cast <bool> (Op0.isGlobal() && "First operand must be a global containing handler name."
) ? void (0) : __assert_fail ("Op0.isGlobal() && \"First operand must be a global containing handler name.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1079, __extension__
__PRETTY_FUNCTION__))
2
'?' condition is true
1079 "First operand must be a global containing handler name.")(static_cast <bool> (Op0.isGlobal() && "First operand must be a global containing handler name."
) ? void (0) : __assert_fail ("Op0.isGlobal() && \"First operand must be a global containing handler name.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1079, __extension__
__PRETTY_FUNCTION__))
;
1080 const GlobalValue *NameVar = Op0.getGlobal();
1081 const GlobalVariable *GV = dyn_cast<GlobalVariable>(NameVar);
3
Assuming 'NameVar' is not a 'CastReturnType'
4
'GV' initialized to a null pointer value
1082 auto *Arr = cast<ConstantDataArray>(GV->getInitializer());
5
Called C++ object pointer is null
1083 StringRef NameStr = Arr->isCString() ? Arr->getAsCString() : Arr->getAsString();
1084
1085 MachineOperand &Op1 = MI.getOperand(1);
1086 // Set R0 with the imm value to be passed to the custom profiling handler.
1087 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrsi), Hexagon::R0)
1088 .addImm(Op1.getImm());
1089 // The call to the custom handler is being treated as a special one as the
1090 // callee is responsible for saving and restoring all the registers
1091 // (including caller saved registers) it needs to modify. This is
1092 // done to reduce the impact of instrumentation on the code being
1093 // instrumented/profiled.
1094 // NOTE: R14, R15 and R28 are reserved for PLT handling. These registers
1095 // are in the Def list of the Hexagon::PS_call_instrprof_custom and
1096 // therefore will be handled appropriately duing register allocation.
1097
1098 // TODO: It may be a good idea to add a separate pseudo instruction for
1099 // static relocation which doesn't need to reserve r14, r15 and r28.
1100
1101 auto MIB = BuildMI(MBB, MI, DL, get(Hexagon::J2_call))
1102 .addUse(Hexagon::R0, RegState::Implicit|RegState::InternalRead)
1103 .addDef(Hexagon::R29, RegState::ImplicitDefine)
1104 .addDef(Hexagon::R30, RegState::ImplicitDefine)
1105 .addDef(Hexagon::R14, RegState::ImplicitDefine)
1106 .addDef(Hexagon::R15, RegState::ImplicitDefine)
1107 .addDef(Hexagon::R28, RegState::ImplicitDefine);
1108 const char *cstr = MF.createExternalSymbolName(NameStr);
1109 MIB.addExternalSymbol(cstr);
1110 MBB.erase(MI);
1111 return true;
1112 }
1113 case TargetOpcode::COPY: {
1114 MachineOperand &MD = MI.getOperand(0);
1115 MachineOperand &MS = MI.getOperand(1);
1116 MachineBasicBlock::iterator MBBI = MI.getIterator();
1117 if (MD.getReg() != MS.getReg() && !MS.isUndef()) {
1118 copyPhysReg(MBB, MI, DL, MD.getReg(), MS.getReg(), MS.isKill());
1119 std::prev(MBBI)->copyImplicitOps(*MBB.getParent(), MI);
1120 }
1121 MBB.erase(MBBI);
1122 return true;
1123 }
1124 case Hexagon::PS_aligna:
1125 BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI.getOperand(0).getReg())
1126 .addReg(HRI.getFrameRegister())
1127 .addImm(-MI.getOperand(1).getImm());
1128 MBB.erase(MI);
1129 return true;
1130 case Hexagon::V6_vassignp: {
1131 Register SrcReg = MI.getOperand(1).getReg();
1132 Register DstReg = MI.getOperand(0).getReg();
1133 Register SrcLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1134 Register SrcHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1135 getLiveInRegsAt(LiveIn, MI);
1136 unsigned UndefLo = getUndefRegState(!LiveIn.contains(SrcLo));
1137 unsigned UndefHi = getUndefRegState(!LiveIn.contains(SrcHi));
1138 unsigned Kill = getKillRegState(MI.getOperand(1).isKill());
1139 BuildMI(MBB, MI, DL, get(Hexagon::V6_vcombine), DstReg)
1140 .addReg(SrcHi, UndefHi)
1141 .addReg(SrcLo, Kill | UndefLo);
1142 MBB.erase(MI);
1143 return true;
1144 }
1145 case Hexagon::V6_lo: {
1146 Register SrcReg = MI.getOperand(1).getReg();
1147 Register DstReg = MI.getOperand(0).getReg();
1148 Register SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1149 copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill());
1150 MBB.erase(MI);
1151 MRI.clearKillFlags(SrcSubLo);
1152 return true;
1153 }
1154 case Hexagon::V6_hi: {
1155 Register SrcReg = MI.getOperand(1).getReg();
1156 Register DstReg = MI.getOperand(0).getReg();
1157 Register SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1158 copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill());
1159 MBB.erase(MI);
1160 MRI.clearKillFlags(SrcSubHi);
1161 return true;
1162 }
1163 case Hexagon::PS_vloadrv_ai: {
1164 Register DstReg = MI.getOperand(0).getReg();
1165 const MachineOperand &BaseOp = MI.getOperand(1);
1166 assert(BaseOp.getSubReg() == 0)(static_cast <bool> (BaseOp.getSubReg() == 0) ? void (0
) : __assert_fail ("BaseOp.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1166, __extension__ __PRETTY_FUNCTION__))
;
1167 int Offset = MI.getOperand(2).getImm();
1168 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1169 unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vL32b_ai
1170 : Hexagon::V6_vL32Ub_ai;
1171 BuildMI(MBB, MI, DL, get(NewOpc), DstReg)
1172 .addReg(BaseOp.getReg(), getRegState(BaseOp))
1173 .addImm(Offset)
1174 .cloneMemRefs(MI);
1175 MBB.erase(MI);
1176 return true;
1177 }
1178 case Hexagon::PS_vloadrw_ai: {
1179 Register DstReg = MI.getOperand(0).getReg();
1180 const MachineOperand &BaseOp = MI.getOperand(1);
1181 assert(BaseOp.getSubReg() == 0)(static_cast <bool> (BaseOp.getSubReg() == 0) ? void (0
) : __assert_fail ("BaseOp.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1181, __extension__ __PRETTY_FUNCTION__))
;
1182 int Offset = MI.getOperand(2).getImm();
1183 unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1184 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1185 unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vL32b_ai
1186 : Hexagon::V6_vL32Ub_ai;
1187 BuildMI(MBB, MI, DL, get(NewOpc),
1188 HRI.getSubReg(DstReg, Hexagon::vsub_lo))
1189 .addReg(BaseOp.getReg(), getRegState(BaseOp) & ~RegState::Kill)
1190 .addImm(Offset)
1191 .cloneMemRefs(MI);
1192 BuildMI(MBB, MI, DL, get(NewOpc),
1193 HRI.getSubReg(DstReg, Hexagon::vsub_hi))
1194 .addReg(BaseOp.getReg(), getRegState(BaseOp))
1195 .addImm(Offset + VecOffset)
1196 .cloneMemRefs(MI);
1197 MBB.erase(MI);
1198 return true;
1199 }
1200 case Hexagon::PS_vstorerv_ai: {
1201 const MachineOperand &SrcOp = MI.getOperand(2);
1202 assert(SrcOp.getSubReg() == 0)(static_cast <bool> (SrcOp.getSubReg() == 0) ? void (0)
: __assert_fail ("SrcOp.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1202, __extension__ __PRETTY_FUNCTION__))
;
1203 const MachineOperand &BaseOp = MI.getOperand(0);
1204 assert(BaseOp.getSubReg() == 0)(static_cast <bool> (BaseOp.getSubReg() == 0) ? void (0
) : __assert_fail ("BaseOp.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1204, __extension__ __PRETTY_FUNCTION__))
;
1205 int Offset = MI.getOperand(1).getImm();
1206 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1207 unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vS32b_ai
1208 : Hexagon::V6_vS32Ub_ai;
1209 BuildMI(MBB, MI, DL, get(NewOpc))
1210 .addReg(BaseOp.getReg(), getRegState(BaseOp))
1211 .addImm(Offset)
1212 .addReg(SrcOp.getReg(), getRegState(SrcOp))
1213 .cloneMemRefs(MI);
1214 MBB.erase(MI);
1215 return true;
1216 }
1217 case Hexagon::PS_vstorerw_ai: {
1218 Register SrcReg = MI.getOperand(2).getReg();
1219 const MachineOperand &BaseOp = MI.getOperand(0);
1220 assert(BaseOp.getSubReg() == 0)(static_cast <bool> (BaseOp.getSubReg() == 0) ? void (0
) : __assert_fail ("BaseOp.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1220, __extension__ __PRETTY_FUNCTION__))
;
1221 int Offset = MI.getOperand(1).getImm();
1222 unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1223 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1224 unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vS32b_ai
1225 : Hexagon::V6_vS32Ub_ai;
1226 BuildMI(MBB, MI, DL, get(NewOpc))
1227 .addReg(BaseOp.getReg(), getRegState(BaseOp) & ~RegState::Kill)
1228 .addImm(Offset)
1229 .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_lo))
1230 .cloneMemRefs(MI);
1231 BuildMI(MBB, MI, DL, get(NewOpc))
1232 .addReg(BaseOp.getReg(), getRegState(BaseOp))
1233 .addImm(Offset + VecOffset)
1234 .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi))
1235 .cloneMemRefs(MI);
1236 MBB.erase(MI);
1237 return true;
1238 }
1239 case Hexagon::PS_true: {
1240 Register Reg = MI.getOperand(0).getReg();
1241 BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg)
1242 .addReg(Reg, RegState::Undef)
1243 .addReg(Reg, RegState::Undef);
1244 MBB.erase(MI);
1245 return true;
1246 }
1247 case Hexagon::PS_false: {
1248 Register Reg = MI.getOperand(0).getReg();
1249 BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg)
1250 .addReg(Reg, RegState::Undef)
1251 .addReg(Reg, RegState::Undef);
1252 MBB.erase(MI);
1253 return true;
1254 }
1255 case Hexagon::PS_qtrue: {
1256 BuildMI(MBB, MI, DL, get(Hexagon::V6_veqw), MI.getOperand(0).getReg())
1257 .addReg(Hexagon::V0, RegState::Undef)
1258 .addReg(Hexagon::V0, RegState::Undef);
1259 MBB.erase(MI);
1260 return true;
1261 }
1262 case Hexagon::PS_qfalse: {
1263 BuildMI(MBB, MI, DL, get(Hexagon::V6_vgtw), MI.getOperand(0).getReg())
1264 .addReg(Hexagon::V0, RegState::Undef)
1265 .addReg(Hexagon::V0, RegState::Undef);
1266 MBB.erase(MI);
1267 return true;
1268 }
1269 case Hexagon::PS_vdd0: {
1270 Register Vd = MI.getOperand(0).getReg();
1271 BuildMI(MBB, MI, DL, get(Hexagon::V6_vsubw_dv), Vd)
1272 .addReg(Vd, RegState::Undef)
1273 .addReg(Vd, RegState::Undef);
1274 MBB.erase(MI);
1275 return true;
1276 }
1277 case Hexagon::PS_vmulw: {
1278 // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies.
1279 Register DstReg = MI.getOperand(0).getReg();
1280 Register Src1Reg = MI.getOperand(1).getReg();
1281 Register Src2Reg = MI.getOperand(2).getReg();
1282 Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1283 Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1284 Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1285 Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1286 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1287 HRI.getSubReg(DstReg, Hexagon::isub_hi))
1288 .addReg(Src1SubHi)
1289 .addReg(Src2SubHi);
1290 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1291 HRI.getSubReg(DstReg, Hexagon::isub_lo))
1292 .addReg(Src1SubLo)
1293 .addReg(Src2SubLo);
1294 MBB.erase(MI);
1295 MRI.clearKillFlags(Src1SubHi);
1296 MRI.clearKillFlags(Src1SubLo);
1297 MRI.clearKillFlags(Src2SubHi);
1298 MRI.clearKillFlags(Src2SubLo);
1299 return true;
1300 }
1301 case Hexagon::PS_vmulw_acc: {
1302 // Expand 64-bit vector multiply with addition into 2 scalar multiplies.
1303 Register DstReg = MI.getOperand(0).getReg();
1304 Register Src1Reg = MI.getOperand(1).getReg();
1305 Register Src2Reg = MI.getOperand(2).getReg();
1306 Register Src3Reg = MI.getOperand(3).getReg();
1307 Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1308 Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1309 Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1310 Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1311 Register Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi);
1312 Register Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo);
1313 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1314 HRI.getSubReg(DstReg, Hexagon::isub_hi))
1315 .addReg(Src1SubHi)
1316 .addReg(Src2SubHi)
1317 .addReg(Src3SubHi);
1318 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1319 HRI.getSubReg(DstReg, Hexagon::isub_lo))
1320 .addReg(Src1SubLo)
1321 .addReg(Src2SubLo)
1322 .addReg(Src3SubLo);
1323 MBB.erase(MI);
1324 MRI.clearKillFlags(Src1SubHi);
1325 MRI.clearKillFlags(Src1SubLo);
1326 MRI.clearKillFlags(Src2SubHi);
1327 MRI.clearKillFlags(Src2SubLo);
1328 MRI.clearKillFlags(Src3SubHi);
1329 MRI.clearKillFlags(Src3SubLo);
1330 return true;
1331 }
1332 case Hexagon::PS_pselect: {
1333 const MachineOperand &Op0 = MI.getOperand(0);
1334 const MachineOperand &Op1 = MI.getOperand(1);
1335 const MachineOperand &Op2 = MI.getOperand(2);
1336 const MachineOperand &Op3 = MI.getOperand(3);
1337 Register Rd = Op0.getReg();
1338 Register Pu = Op1.getReg();
1339 Register Rs = Op2.getReg();
1340 Register Rt = Op3.getReg();
1341 DebugLoc DL = MI.getDebugLoc();
1342 unsigned K1 = getKillRegState(Op1.isKill());
1343 unsigned K2 = getKillRegState(Op2.isKill());
1344 unsigned K3 = getKillRegState(Op3.isKill());
1345 if (Rd != Rs)
1346 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpt), Rd)
1347 .addReg(Pu, (Rd == Rt) ? K1 : 0)
1348 .addReg(Rs, K2);
1349 if (Rd != Rt)
1350 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpf), Rd)
1351 .addReg(Pu, K1)
1352 .addReg(Rt, K3);
1353 MBB.erase(MI);
1354 return true;
1355 }
1356 case Hexagon::PS_vselect: {
1357 const MachineOperand &Op0 = MI.getOperand(0);
1358 const MachineOperand &Op1 = MI.getOperand(1);
1359 const MachineOperand &Op2 = MI.getOperand(2);
1360 const MachineOperand &Op3 = MI.getOperand(3);
1361 getLiveOutRegsAt(LiveOut, MI);
1362 bool IsDestLive = !LiveOut.available(MRI, Op0.getReg());
1363 Register PReg = Op1.getReg();
1364 assert(Op1.getSubReg() == 0)(static_cast <bool> (Op1.getSubReg() == 0) ? void (0) :
__assert_fail ("Op1.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1364, __extension__ __PRETTY_FUNCTION__))
;
1365 unsigned PState = getRegState(Op1);
1366
1367 if (Op0.getReg() != Op2.getReg()) {
1368 unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1369 : PState;
1370 auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov))
1371 .add(Op0)
1372 .addReg(PReg, S)
1373 .add(Op2);
1374 if (IsDestLive)
1375 T.addReg(Op0.getReg(), RegState::Implicit);
1376 IsDestLive = true;
1377 }
1378 if (Op0.getReg() != Op3.getReg()) {
1379 auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov))
1380 .add(Op0)
1381 .addReg(PReg, PState)
1382 .add(Op3);
1383 if (IsDestLive)
1384 T.addReg(Op0.getReg(), RegState::Implicit);
1385 }
1386 MBB.erase(MI);
1387 return true;
1388 }
1389 case Hexagon::PS_wselect: {
1390 MachineOperand &Op0 = MI.getOperand(0);
1391 MachineOperand &Op1 = MI.getOperand(1);
1392 MachineOperand &Op2 = MI.getOperand(2);
1393 MachineOperand &Op3 = MI.getOperand(3);
1394 getLiveOutRegsAt(LiveOut, MI);
1395 bool IsDestLive = !LiveOut.available(MRI, Op0.getReg());
1396 Register PReg = Op1.getReg();
1397 assert(Op1.getSubReg() == 0)(static_cast <bool> (Op1.getSubReg() == 0) ? void (0) :
__assert_fail ("Op1.getSubReg() == 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1397, __extension__ __PRETTY_FUNCTION__))
;
1398 unsigned PState = getRegState(Op1);
1399
1400 if (Op0.getReg() != Op2.getReg()) {
1401 unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1402 : PState;
1403 Register SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo);
1404 Register SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi);
1405 auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine))
1406 .add(Op0)
1407 .addReg(PReg, S)
1408 .addReg(SrcHi)
1409 .addReg(SrcLo);
1410 if (IsDestLive)
1411 T.addReg(Op0.getReg(), RegState::Implicit);
1412 IsDestLive = true;
1413 }
1414 if (Op0.getReg() != Op3.getReg()) {
1415 Register SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo);
1416 Register SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi);
1417 auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine))
1418 .add(Op0)
1419 .addReg(PReg, PState)
1420 .addReg(SrcHi)
1421 .addReg(SrcLo);
1422 if (IsDestLive)
1423 T.addReg(Op0.getReg(), RegState::Implicit);
1424 }
1425 MBB.erase(MI);
1426 return true;
1427 }
1428
1429 case Hexagon::PS_crash: {
1430 // Generate a misaligned load that is guaranteed to cause a crash.
1431 class CrashPseudoSourceValue : public PseudoSourceValue {
1432 public:
1433 CrashPseudoSourceValue(const TargetMachine &TM)
1434 : PseudoSourceValue(TargetCustom, TM) {}
1435
1436 bool isConstant(const MachineFrameInfo *) const override {
1437 return false;
1438 }
1439 bool isAliased(const MachineFrameInfo *) const override {
1440 return false;
1441 }
1442 bool mayAlias(const MachineFrameInfo *) const override {
1443 return false;
1444 }
1445 void printCustom(raw_ostream &OS) const override {
1446 OS << "MisalignedCrash";
1447 }
1448 };
1449
1450 static const CrashPseudoSourceValue CrashPSV(MF.getTarget());
1451 MachineMemOperand *MMO = MF.getMachineMemOperand(
1452 MachinePointerInfo(&CrashPSV),
1453 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 8,
1454 Align(1));
1455 BuildMI(MBB, MI, DL, get(Hexagon::PS_loadrdabs), Hexagon::D13)
1456 .addImm(0xBADC0FEE) // Misaligned load.
1457 .addMemOperand(MMO);
1458 MBB.erase(MI);
1459 return true;
1460 }
1461
1462 case Hexagon::PS_tailcall_i:
1463 MI.setDesc(get(Hexagon::J2_jump));
1464 return true;
1465 case Hexagon::PS_tailcall_r:
1466 case Hexagon::PS_jmpret:
1467 MI.setDesc(get(Hexagon::J2_jumpr));
1468 return true;
1469 case Hexagon::PS_jmprett:
1470 MI.setDesc(get(Hexagon::J2_jumprt));
1471 return true;
1472 case Hexagon::PS_jmpretf:
1473 MI.setDesc(get(Hexagon::J2_jumprf));
1474 return true;
1475 case Hexagon::PS_jmprettnewpt:
1476 MI.setDesc(get(Hexagon::J2_jumprtnewpt));
1477 return true;
1478 case Hexagon::PS_jmpretfnewpt:
1479 MI.setDesc(get(Hexagon::J2_jumprfnewpt));
1480 return true;
1481 case Hexagon::PS_jmprettnew:
1482 MI.setDesc(get(Hexagon::J2_jumprtnew));
1483 return true;
1484 case Hexagon::PS_jmpretfnew:
1485 MI.setDesc(get(Hexagon::J2_jumprfnew));
1486 return true;
1487
1488 case Hexagon::PS_loadrub_pci:
1489 return RealCirc(Hexagon::L2_loadrub_pci, /*HasImm*/true, /*MxOp*/4);
1490 case Hexagon::PS_loadrb_pci:
1491 return RealCirc(Hexagon::L2_loadrb_pci, /*HasImm*/true, /*MxOp*/4);
1492 case Hexagon::PS_loadruh_pci:
1493 return RealCirc(Hexagon::L2_loadruh_pci, /*HasImm*/true, /*MxOp*/4);
1494 case Hexagon::PS_loadrh_pci:
1495 return RealCirc(Hexagon::L2_loadrh_pci, /*HasImm*/true, /*MxOp*/4);
1496 case Hexagon::PS_loadri_pci:
1497 return RealCirc(Hexagon::L2_loadri_pci, /*HasImm*/true, /*MxOp*/4);
1498 case Hexagon::PS_loadrd_pci:
1499 return RealCirc(Hexagon::L2_loadrd_pci, /*HasImm*/true, /*MxOp*/4);
1500 case Hexagon::PS_loadrub_pcr:
1501 return RealCirc(Hexagon::L2_loadrub_pcr, /*HasImm*/false, /*MxOp*/3);
1502 case Hexagon::PS_loadrb_pcr:
1503 return RealCirc(Hexagon::L2_loadrb_pcr, /*HasImm*/false, /*MxOp*/3);
1504 case Hexagon::PS_loadruh_pcr:
1505 return RealCirc(Hexagon::L2_loadruh_pcr, /*HasImm*/false, /*MxOp*/3);
1506 case Hexagon::PS_loadrh_pcr:
1507 return RealCirc(Hexagon::L2_loadrh_pcr, /*HasImm*/false, /*MxOp*/3);
1508 case Hexagon::PS_loadri_pcr:
1509 return RealCirc(Hexagon::L2_loadri_pcr, /*HasImm*/false, /*MxOp*/3);
1510 case Hexagon::PS_loadrd_pcr:
1511 return RealCirc(Hexagon::L2_loadrd_pcr, /*HasImm*/false, /*MxOp*/3);
1512 case Hexagon::PS_storerb_pci:
1513 return RealCirc(Hexagon::S2_storerb_pci, /*HasImm*/true, /*MxOp*/3);
1514 case Hexagon::PS_storerh_pci:
1515 return RealCirc(Hexagon::S2_storerh_pci, /*HasImm*/true, /*MxOp*/3);
1516 case Hexagon::PS_storerf_pci:
1517 return RealCirc(Hexagon::S2_storerf_pci, /*HasImm*/true, /*MxOp*/3);
1518 case Hexagon::PS_storeri_pci:
1519 return RealCirc(Hexagon::S2_storeri_pci, /*HasImm*/true, /*MxOp*/3);
1520 case Hexagon::PS_storerd_pci:
1521 return RealCirc(Hexagon::S2_storerd_pci, /*HasImm*/true, /*MxOp*/3);
1522 case Hexagon::PS_storerb_pcr:
1523 return RealCirc(Hexagon::S2_storerb_pcr, /*HasImm*/false, /*MxOp*/2);
1524 case Hexagon::PS_storerh_pcr:
1525 return RealCirc(Hexagon::S2_storerh_pcr, /*HasImm*/false, /*MxOp*/2);
1526 case Hexagon::PS_storerf_pcr:
1527 return RealCirc(Hexagon::S2_storerf_pcr, /*HasImm*/false, /*MxOp*/2);
1528 case Hexagon::PS_storeri_pcr:
1529 return RealCirc(Hexagon::S2_storeri_pcr, /*HasImm*/false, /*MxOp*/2);
1530 case Hexagon::PS_storerd_pcr:
1531 return RealCirc(Hexagon::S2_storerd_pcr, /*HasImm*/false, /*MxOp*/2);
1532 }
1533
1534 return false;
1535}
1536
1537MachineBasicBlock::instr_iterator
1538HexagonInstrInfo::expandVGatherPseudo(MachineInstr &MI) const {
1539 MachineBasicBlock &MBB = *MI.getParent();
1540 const DebugLoc &DL = MI.getDebugLoc();
1541 unsigned Opc = MI.getOpcode();
1542 MachineBasicBlock::iterator First;
1543
1544 switch (Opc) {
1545 case Hexagon::V6_vgathermh_pseudo:
1546 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermh))
1547 .add(MI.getOperand(2))
1548 .add(MI.getOperand(3))
1549 .add(MI.getOperand(4));
1550 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1551 .add(MI.getOperand(0))
1552 .addImm(MI.getOperand(1).getImm())
1553 .addReg(Hexagon::VTMP);
1554 MBB.erase(MI);
1555 return First.getInstrIterator();
1556
1557 case Hexagon::V6_vgathermw_pseudo:
1558 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermw))
1559 .add(MI.getOperand(2))
1560 .add(MI.getOperand(3))
1561 .add(MI.getOperand(4));
1562 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1563 .add(MI.getOperand(0))
1564 .addImm(MI.getOperand(1).getImm())
1565 .addReg(Hexagon::VTMP);
1566 MBB.erase(MI);
1567 return First.getInstrIterator();
1568
1569 case Hexagon::V6_vgathermhw_pseudo:
1570 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhw))
1571 .add(MI.getOperand(2))
1572 .add(MI.getOperand(3))
1573 .add(MI.getOperand(4));
1574 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1575 .add(MI.getOperand(0))
1576 .addImm(MI.getOperand(1).getImm())
1577 .addReg(Hexagon::VTMP);
1578 MBB.erase(MI);
1579 return First.getInstrIterator();
1580
1581 case Hexagon::V6_vgathermhq_pseudo:
1582 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhq))
1583 .add(MI.getOperand(2))
1584 .add(MI.getOperand(3))
1585 .add(MI.getOperand(4))
1586 .add(MI.getOperand(5));
1587 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1588 .add(MI.getOperand(0))
1589 .addImm(MI.getOperand(1).getImm())
1590 .addReg(Hexagon::VTMP);
1591 MBB.erase(MI);
1592 return First.getInstrIterator();
1593
1594 case Hexagon::V6_vgathermwq_pseudo:
1595 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermwq))
1596 .add(MI.getOperand(2))
1597 .add(MI.getOperand(3))
1598 .add(MI.getOperand(4))
1599 .add(MI.getOperand(5));
1600 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1601 .add(MI.getOperand(0))
1602 .addImm(MI.getOperand(1).getImm())
1603 .addReg(Hexagon::VTMP);
1604 MBB.erase(MI);
1605 return First.getInstrIterator();
1606
1607 case Hexagon::V6_vgathermhwq_pseudo:
1608 First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhwq))
1609 .add(MI.getOperand(2))
1610 .add(MI.getOperand(3))
1611 .add(MI.getOperand(4))
1612 .add(MI.getOperand(5));
1613 BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1614 .add(MI.getOperand(0))
1615 .addImm(MI.getOperand(1).getImm())
1616 .addReg(Hexagon::VTMP);
1617 MBB.erase(MI);
1618 return First.getInstrIterator();
1619 }
1620
1621 return MI.getIterator();
1622}
1623
1624// We indicate that we want to reverse the branch by
1625// inserting the reversed branching opcode.
1626bool HexagonInstrInfo::reverseBranchCondition(
1627 SmallVectorImpl<MachineOperand> &Cond) const {
1628 if (Cond.empty())
1629 return true;
1630 assert(Cond[0].isImm() && "First entry in the cond vector not imm-val")(static_cast <bool> (Cond[0].isImm() && "First entry in the cond vector not imm-val"
) ? void (0) : __assert_fail ("Cond[0].isImm() && \"First entry in the cond vector not imm-val\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1630, __extension__
__PRETTY_FUNCTION__))
;
1631 unsigned opcode = Cond[0].getImm();
1632 //unsigned temp;
1633 assert(get(opcode).isBranch() && "Should be a branching condition.")(static_cast <bool> (get(opcode).isBranch() && "Should be a branching condition."
) ? void (0) : __assert_fail ("get(opcode).isBranch() && \"Should be a branching condition.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1633, __extension__
__PRETTY_FUNCTION__))
;
1634 if (isEndLoopN(opcode))
1635 return true;
1636 unsigned NewOpcode = getInvertedPredicatedOpcode(opcode);
1637 Cond[0].setImm(NewOpcode);
1638 return false;
1639}
1640
1641void HexagonInstrInfo::insertNoop(MachineBasicBlock &MBB,
1642 MachineBasicBlock::iterator MI) const {
1643 DebugLoc DL;
1644 BuildMI(MBB, MI, DL, get(Hexagon::A2_nop));
1645}
1646
1647bool HexagonInstrInfo::isPostIncrement(const MachineInstr &MI) const {
1648 return getAddrMode(MI) == HexagonII::PostInc;
1649}
1650
1651// Returns true if an instruction is predicated irrespective of the predicate
1652// sense. For example, all of the following will return true.
1653// if (p0) R1 = add(R2, R3)
1654// if (!p0) R1 = add(R2, R3)
1655// if (p0.new) R1 = add(R2, R3)
1656// if (!p0.new) R1 = add(R2, R3)
1657// Note: New-value stores are not included here as in the current
1658// implementation, we don't need to check their predicate sense.
1659bool HexagonInstrInfo::isPredicated(const MachineInstr &MI) const {
1660 const uint64_t F = MI.getDesc().TSFlags;
1661 return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
1662}
1663
1664bool HexagonInstrInfo::PredicateInstruction(
1665 MachineInstr &MI, ArrayRef<MachineOperand> Cond) const {
1666 if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
1667 isEndLoopN(Cond[0].getImm())) {
1668 LLVM_DEBUG(dbgs() << "\nCannot predicate:"; MI.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nCannot predicate:"
; MI.dump();; } } while (false)
;
1669 return false;
1670 }
1671 int Opc = MI.getOpcode();
1672 assert (isPredicable(MI) && "Expected predicable instruction")(static_cast <bool> (isPredicable(MI) && "Expected predicable instruction"
) ? void (0) : __assert_fail ("isPredicable(MI) && \"Expected predicable instruction\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 1672, __extension__
__PRETTY_FUNCTION__))
;
1673 bool invertJump = predOpcodeHasNot(Cond);
1674
1675 // We have to predicate MI "in place", i.e. after this function returns,
1676 // MI will need to be transformed into a predicated form. To avoid com-
1677 // plicated manipulations with the operands (handling tied operands,
1678 // etc.), build a new temporary instruction, then overwrite MI with it.
1679
1680 MachineBasicBlock &B = *MI.getParent();
1681 DebugLoc DL = MI.getDebugLoc();
1682 unsigned PredOpc = getCondOpcode(Opc, invertJump);
1683 MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc));
1684 unsigned NOp = 0, NumOps = MI.getNumOperands();
1685 while (NOp < NumOps) {
1686 MachineOperand &Op = MI.getOperand(NOp);
1687 if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1688 break;
1689 T.add(Op);
1690 NOp++;
1691 }
1692
1693 unsigned PredReg, PredRegPos, PredRegFlags;
1694 bool GotPredReg = getPredReg(Cond, PredReg, PredRegPos, PredRegFlags);
1695 (void)GotPredReg;
1696 assert(GotPredReg)(static_cast <bool> (GotPredReg) ? void (0) : __assert_fail
("GotPredReg", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 1696, __extension__ __PRETTY_FUNCTION__))
;
1697 T.addReg(PredReg, PredRegFlags);
1698 while (NOp < NumOps)
1699 T.add(MI.getOperand(NOp++));
1700
1701 MI.setDesc(get(PredOpc));
1702 while (unsigned n = MI.getNumOperands())
1703 MI.removeOperand(n-1);
1704 for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i)
1705 MI.addOperand(T->getOperand(i));
1706
1707 MachineBasicBlock::instr_iterator TI = T->getIterator();
1708 B.erase(TI);
1709
1710 MachineRegisterInfo &MRI = B.getParent()->getRegInfo();
1711 MRI.clearKillFlags(PredReg);
1712 return true;
1713}
1714
1715bool HexagonInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
1716 ArrayRef<MachineOperand> Pred2) const {
1717 // TODO: Fix this
1718 return false;
1719}
1720
1721bool HexagonInstrInfo::ClobbersPredicate(MachineInstr &MI,
1722 std::vector<MachineOperand> &Pred,
1723 bool SkipDead) const {
1724 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1725
1726 for (const MachineOperand &MO : MI.operands()) {
1727 if (MO.isReg()) {
1728 if (!MO.isDef())
1729 continue;
1730 const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg());
1731 if (RC == &Hexagon::PredRegsRegClass) {
1732 Pred.push_back(MO);
1733 return true;
1734 }
1735 continue;
1736 } else if (MO.isRegMask()) {
1737 for (unsigned PR : Hexagon::PredRegsRegClass) {
1738 if (!MI.modifiesRegister(PR, &HRI))
1739 continue;
1740 Pred.push_back(MO);
1741 return true;
1742 }
1743 }
1744 }
1745 return false;
1746}
1747
1748bool HexagonInstrInfo::isPredicable(const MachineInstr &MI) const {
1749 if (!MI.getDesc().isPredicable())
1750 return false;
1751
1752 if (MI.isCall() || isTailCall(MI)) {
1753 if (!Subtarget.usePredicatedCalls())
1754 return false;
1755 }
1756
1757 // HVX loads are not predicable on v60, but are on v62.
1758 if (!Subtarget.hasV62Ops()) {
1759 switch (MI.getOpcode()) {
1760 case Hexagon::V6_vL32b_ai:
1761 case Hexagon::V6_vL32b_pi:
1762 case Hexagon::V6_vL32b_ppu:
1763 case Hexagon::V6_vL32b_cur_ai:
1764 case Hexagon::V6_vL32b_cur_pi:
1765 case Hexagon::V6_vL32b_cur_ppu:
1766 case Hexagon::V6_vL32b_nt_ai:
1767 case Hexagon::V6_vL32b_nt_pi:
1768 case Hexagon::V6_vL32b_nt_ppu:
1769 case Hexagon::V6_vL32b_tmp_ai:
1770 case Hexagon::V6_vL32b_tmp_pi:
1771 case Hexagon::V6_vL32b_tmp_ppu:
1772 case Hexagon::V6_vL32b_nt_cur_ai:
1773 case Hexagon::V6_vL32b_nt_cur_pi:
1774 case Hexagon::V6_vL32b_nt_cur_ppu:
1775 case Hexagon::V6_vL32b_nt_tmp_ai:
1776 case Hexagon::V6_vL32b_nt_tmp_pi:
1777 case Hexagon::V6_vL32b_nt_tmp_ppu:
1778 return false;
1779 }
1780 }
1781 return true;
1782}
1783
1784bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1785 const MachineBasicBlock *MBB,
1786 const MachineFunction &MF) const {
1787 // Debug info is never a scheduling boundary. It's necessary to be explicit
1788 // due to the special treatment of IT instructions below, otherwise a
1789 // dbg_value followed by an IT will result in the IT instruction being
1790 // considered a scheduling hazard, which is wrong. It should be the actual
1791 // instruction preceding the dbg_value instruction(s), just like it is
1792 // when debug info is not present.
1793 if (MI.isDebugInstr())
1794 return false;
1795
1796 // Throwing call is a boundary.
1797 if (MI.isCall()) {
1798 // Don't mess around with no return calls.
1799 if (doesNotReturn(MI))
1800 return true;
1801 // If any of the block's successors is a landing pad, this could be a
1802 // throwing call.
1803 for (auto *I : MBB->successors())
1804 if (I->isEHPad())
1805 return true;
1806 }
1807
1808 // Terminators and labels can't be scheduled around.
1809 if (MI.getDesc().isTerminator() || MI.isPosition())
1810 return true;
1811
1812 // INLINEASM_BR can jump to another block
1813 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1814 return true;
1815
1816 if (MI.isInlineAsm() && !ScheduleInlineAsm)
1817 return true;
1818
1819 return false;
1820}
1821
1822/// Measure the specified inline asm to determine an approximation of its
1823/// length.
1824/// Comments (which run till the next SeparatorString or newline) do not
1825/// count as an instruction.
1826/// Any other non-whitespace text is considered an instruction, with
1827/// multiple instructions separated by SeparatorString or newlines.
1828/// Variable-length instructions are not handled here; this function
1829/// may be overloaded in the target code to do that.
1830/// Hexagon counts the number of ##'s and adjust for that many
1831/// constant exenders.
1832unsigned HexagonInstrInfo::getInlineAsmLength(const char *Str,
1833 const MCAsmInfo &MAI,
1834 const TargetSubtargetInfo *STI) const {
1835 StringRef AStr(Str);
1836 // Count the number of instructions in the asm.
1837 bool atInsnStart = true;
1838 unsigned Length = 0;
1839 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
1840 for (; *Str; ++Str) {
1841 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
1842 strlen(MAI.getSeparatorString())) == 0)
1843 atInsnStart = true;
1844 if (atInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
1845 Length += MaxInstLength;
1846 atInsnStart = false;
1847 }
1848 if (atInsnStart && strncmp(Str, MAI.getCommentString().data(),
1849 MAI.getCommentString().size()) == 0)
1850 atInsnStart = false;
1851 }
1852
1853 // Add to size number of constant extenders seen * 4.
1854 StringRef Occ("##");
1855 Length += AStr.count(Occ)*4;
1856 return Length;
1857}
1858
1859ScheduleHazardRecognizer*
1860HexagonInstrInfo::CreateTargetPostRAHazardRecognizer(
1861 const InstrItineraryData *II, const ScheduleDAG *DAG) const {
1862 if (UseDFAHazardRec)
1863 return new HexagonHazardRecognizer(II, this, Subtarget);
1864 return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
1865}
1866
1867/// For a comparison instruction, return the source registers in
1868/// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
1869/// compares against in CmpValue. Return true if the comparison instruction
1870/// can be analyzed.
1871bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1872 Register &SrcReg2, int64_t &Mask,
1873 int64_t &Value) const {
1874 unsigned Opc = MI.getOpcode();
1875
1876 // Set mask and the first source register.
1877 switch (Opc) {
1878 case Hexagon::C2_cmpeq:
1879 case Hexagon::C2_cmpeqp:
1880 case Hexagon::C2_cmpgt:
1881 case Hexagon::C2_cmpgtp:
1882 case Hexagon::C2_cmpgtu:
1883 case Hexagon::C2_cmpgtup:
1884 case Hexagon::C4_cmpneq:
1885 case Hexagon::C4_cmplte:
1886 case Hexagon::C4_cmplteu:
1887 case Hexagon::C2_cmpeqi:
1888 case Hexagon::C2_cmpgti:
1889 case Hexagon::C2_cmpgtui:
1890 case Hexagon::C4_cmpneqi:
1891 case Hexagon::C4_cmplteui:
1892 case Hexagon::C4_cmpltei:
1893 SrcReg = MI.getOperand(1).getReg();
1894 Mask = ~0;
1895 break;
1896 case Hexagon::A4_cmpbeq:
1897 case Hexagon::A4_cmpbgt:
1898 case Hexagon::A4_cmpbgtu:
1899 case Hexagon::A4_cmpbeqi:
1900 case Hexagon::A4_cmpbgti:
1901 case Hexagon::A4_cmpbgtui:
1902 SrcReg = MI.getOperand(1).getReg();
1903 Mask = 0xFF;
1904 break;
1905 case Hexagon::A4_cmpheq:
1906 case Hexagon::A4_cmphgt:
1907 case Hexagon::A4_cmphgtu:
1908 case Hexagon::A4_cmpheqi:
1909 case Hexagon::A4_cmphgti:
1910 case Hexagon::A4_cmphgtui:
1911 SrcReg = MI.getOperand(1).getReg();
1912 Mask = 0xFFFF;
1913 break;
1914 }
1915
1916 // Set the value/second source register.
1917 switch (Opc) {
1918 case Hexagon::C2_cmpeq:
1919 case Hexagon::C2_cmpeqp:
1920 case Hexagon::C2_cmpgt:
1921 case Hexagon::C2_cmpgtp:
1922 case Hexagon::C2_cmpgtu:
1923 case Hexagon::C2_cmpgtup:
1924 case Hexagon::A4_cmpbeq:
1925 case Hexagon::A4_cmpbgt:
1926 case Hexagon::A4_cmpbgtu:
1927 case Hexagon::A4_cmpheq:
1928 case Hexagon::A4_cmphgt:
1929 case Hexagon::A4_cmphgtu:
1930 case Hexagon::C4_cmpneq:
1931 case Hexagon::C4_cmplte:
1932 case Hexagon::C4_cmplteu:
1933 SrcReg2 = MI.getOperand(2).getReg();
1934 Value = 0;
1935 return true;
1936
1937 case Hexagon::C2_cmpeqi:
1938 case Hexagon::C2_cmpgtui:
1939 case Hexagon::C2_cmpgti:
1940 case Hexagon::C4_cmpneqi:
1941 case Hexagon::C4_cmplteui:
1942 case Hexagon::C4_cmpltei:
1943 case Hexagon::A4_cmpbeqi:
1944 case Hexagon::A4_cmpbgti:
1945 case Hexagon::A4_cmpbgtui:
1946 case Hexagon::A4_cmpheqi:
1947 case Hexagon::A4_cmphgti:
1948 case Hexagon::A4_cmphgtui: {
1949 SrcReg2 = 0;
1950 const MachineOperand &Op2 = MI.getOperand(2);
1951 if (!Op2.isImm())
1952 return false;
1953 Value = MI.getOperand(2).getImm();
1954 return true;
1955 }
1956 }
1957
1958 return false;
1959}
1960
1961unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1962 const MachineInstr &MI,
1963 unsigned *PredCost) const {
1964 return getInstrTimingClassLatency(ItinData, MI);
1965}
1966
1967DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
1968 const TargetSubtargetInfo &STI) const {
1969 const InstrItineraryData *II = STI.getInstrItineraryData();
1970 return static_cast<const HexagonSubtarget&>(STI).createDFAPacketizer(II);
1971}
1972
1973// Inspired by this pair:
1974// %r13 = L2_loadri_io %r29, 136; mem:LD4[FixedStack0]
1975// S2_storeri_io %r29, 132, killed %r1; flags: mem:ST4[FixedStack1]
1976// Currently AA considers the addresses in these instructions to be aliasing.
1977bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(
1978 const MachineInstr &MIa, const MachineInstr &MIb) const {
1979 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1980 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1981 return false;
1982
1983 // Instructions that are pure loads, not loads and stores like memops are not
1984 // dependent.
1985 if (MIa.mayLoad() && !isMemOp(MIa) && MIb.mayLoad() && !isMemOp(MIb))
1986 return true;
1987
1988 // Get the base register in MIa.
1989 unsigned BasePosA, OffsetPosA;
1990 if (!getBaseAndOffsetPosition(MIa, BasePosA, OffsetPosA))
1991 return false;
1992 const MachineOperand &BaseA = MIa.getOperand(BasePosA);
1993 Register BaseRegA = BaseA.getReg();
1994 unsigned BaseSubA = BaseA.getSubReg();
1995
1996 // Get the base register in MIb.
1997 unsigned BasePosB, OffsetPosB;
1998 if (!getBaseAndOffsetPosition(MIb, BasePosB, OffsetPosB))
1999 return false;
2000 const MachineOperand &BaseB = MIb.getOperand(BasePosB);
2001 Register BaseRegB = BaseB.getReg();
2002 unsigned BaseSubB = BaseB.getSubReg();
2003
2004 if (BaseRegA != BaseRegB || BaseSubA != BaseSubB)
2005 return false;
2006
2007 // Get the access sizes.
2008 unsigned SizeA = getMemAccessSize(MIa);
2009 unsigned SizeB = getMemAccessSize(MIb);
2010
2011 // Get the offsets. Handle immediates only for now.
2012 const MachineOperand &OffA = MIa.getOperand(OffsetPosA);
2013 const MachineOperand &OffB = MIb.getOperand(OffsetPosB);
2014 if (!MIa.getOperand(OffsetPosA).isImm() ||
2015 !MIb.getOperand(OffsetPosB).isImm())
2016 return false;
2017 int OffsetA = isPostIncrement(MIa) ? 0 : OffA.getImm();
2018 int OffsetB = isPostIncrement(MIb) ? 0 : OffB.getImm();
2019
2020 // This is a mem access with the same base register and known offsets from it.
2021 // Reason about it.
2022 if (OffsetA > OffsetB) {
2023 uint64_t OffDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB);
2024 return SizeB <= OffDiff;
2025 }
2026 if (OffsetA < OffsetB) {
2027 uint64_t OffDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA);
2028 return SizeA <= OffDiff;
2029 }
2030
2031 return false;
2032}
2033
2034/// If the instruction is an increment of a constant value, return the amount.
2035bool HexagonInstrInfo::getIncrementValue(const MachineInstr &MI,
2036 int &Value) const {
2037 if (isPostIncrement(MI)) {
2038 unsigned BasePos = 0, OffsetPos = 0;
2039 if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
2040 return false;
2041 const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
2042 if (OffsetOp.isImm()) {
2043 Value = OffsetOp.getImm();
2044 return true;
2045 }
2046 } else if (MI.getOpcode() == Hexagon::A2_addi) {
2047 const MachineOperand &AddOp = MI.getOperand(2);
2048 if (AddOp.isImm()) {
2049 Value = AddOp.getImm();
2050 return true;
2051 }
2052 }
2053
2054 return false;
2055}
2056
2057std::pair<unsigned, unsigned>
2058HexagonInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
2059 return std::make_pair(TF & ~HexagonII::MO_Bitmasks,
2060 TF & HexagonII::MO_Bitmasks);
2061}
2062
2063ArrayRef<std::pair<unsigned, const char*>>
2064HexagonInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
2065 using namespace HexagonII;
2066
2067 static const std::pair<unsigned, const char*> Flags[] = {
2068 {MO_PCREL, "hexagon-pcrel"},
2069 {MO_GOT, "hexagon-got"},
2070 {MO_LO16, "hexagon-lo16"},
2071 {MO_HI16, "hexagon-hi16"},
2072 {MO_GPREL, "hexagon-gprel"},
2073 {MO_GDGOT, "hexagon-gdgot"},
2074 {MO_GDPLT, "hexagon-gdplt"},
2075 {MO_IE, "hexagon-ie"},
2076 {MO_IEGOT, "hexagon-iegot"},
2077 {MO_TPREL, "hexagon-tprel"}
2078 };
2079 return makeArrayRef(Flags);
2080}
2081
2082ArrayRef<std::pair<unsigned, const char*>>
2083HexagonInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
2084 using namespace HexagonII;
2085
2086 static const std::pair<unsigned, const char*> Flags[] = {
2087 {HMOTF_ConstExtended, "hexagon-ext"}
2088 };
2089 return makeArrayRef(Flags);
2090}
2091
2092unsigned HexagonInstrInfo::createVR(MachineFunction *MF, MVT VT) const {
2093 MachineRegisterInfo &MRI = MF->getRegInfo();
2094 const TargetRegisterClass *TRC;
2095 if (VT == MVT::i1) {
2096 TRC = &Hexagon::PredRegsRegClass;
2097 } else if (VT == MVT::i32 || VT == MVT::f32) {
2098 TRC = &Hexagon::IntRegsRegClass;
2099 } else if (VT == MVT::i64 || VT == MVT::f64) {
2100 TRC = &Hexagon::DoubleRegsRegClass;
2101 } else {
2102 llvm_unreachable("Cannot handle this register class")::llvm::llvm_unreachable_internal("Cannot handle this register class"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 2102)
;
2103 }
2104
2105 Register NewReg = MRI.createVirtualRegister(TRC);
2106 return NewReg;
2107}
2108
2109bool HexagonInstrInfo::isAbsoluteSet(const MachineInstr &MI) const {
2110 return (getAddrMode(MI) == HexagonII::AbsoluteSet);
2111}
2112
2113bool HexagonInstrInfo::isAccumulator(const MachineInstr &MI) const {
2114 const uint64_t F = MI.getDesc().TSFlags;
2115 return((F >> HexagonII::AccumulatorPos) & HexagonII::AccumulatorMask);
2116}
2117
2118bool HexagonInstrInfo::isBaseImmOffset(const MachineInstr &MI) const {
2119 return getAddrMode(MI) == HexagonII::BaseImmOffset;
2120}
2121
2122bool HexagonInstrInfo::isComplex(const MachineInstr &MI) const {
2123 return !isTC1(MI) && !isTC2Early(MI) && !MI.getDesc().mayLoad() &&
2124 !MI.getDesc().mayStore() &&
2125 MI.getDesc().getOpcode() != Hexagon::S2_allocframe &&
2126 MI.getDesc().getOpcode() != Hexagon::L2_deallocframe &&
2127 !isMemOp(MI) && !MI.isBranch() && !MI.isReturn() && !MI.isCall();
2128}
2129
2130// Return true if the instruction is a compound branch instruction.
2131bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr &MI) const {
2132 return getType(MI) == HexagonII::TypeCJ && MI.isBranch();
2133}
2134
2135// TODO: In order to have isExtendable for fpimm/f32Ext, we need to handle
2136// isFPImm and later getFPImm as well.
2137bool HexagonInstrInfo::isConstExtended(const MachineInstr &MI) const {
2138 const uint64_t F = MI.getDesc().TSFlags;
2139 unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
2140 if (isExtended) // Instruction must be extended.
2141 return true;
2142
2143 unsigned isExtendable =
2144 (F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask;
2145 if (!isExtendable)
2146 return false;
2147
2148 if (MI.isCall())
2149 return false;
2150
2151 short ExtOpNum = getCExtOpNum(MI);
2152 const MachineOperand &MO = MI.getOperand(ExtOpNum);
2153 // Use MO operand flags to determine if MO
2154 // has the HMOTF_ConstExtended flag set.
2155 if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
2156 return true;
2157 // If this is a Machine BB address we are talking about, and it is
2158 // not marked as extended, say so.
2159 if (MO.isMBB())
2160 return false;
2161
2162 // We could be using an instruction with an extendable immediate and shoehorn
2163 // a global address into it. If it is a global address it will be constant
2164 // extended. We do this for COMBINE.
2165 if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
2166 MO.isJTI() || MO.isCPI() || MO.isFPImm())
2167 return true;
2168
2169 // If the extendable operand is not 'Immediate' type, the instruction should
2170 // have 'isExtended' flag set.
2171 assert(MO.isImm() && "Extendable operand must be Immediate type")(static_cast <bool> (MO.isImm() && "Extendable operand must be Immediate type"
) ? void (0) : __assert_fail ("MO.isImm() && \"Extendable operand must be Immediate type\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 2171, __extension__
__PRETTY_FUNCTION__))
;
2172
2173 int MinValue = getMinValue(MI);
2174 int MaxValue = getMaxValue(MI);
2175 int ImmValue = MO.getImm();
2176
2177 return (ImmValue < MinValue || ImmValue > MaxValue);
2178}
2179
2180bool HexagonInstrInfo::isDeallocRet(const MachineInstr &MI) const {
2181 switch (MI.getOpcode()) {
2182 case Hexagon::L4_return:
2183 case Hexagon::L4_return_t:
2184 case Hexagon::L4_return_f:
2185 case Hexagon::L4_return_tnew_pnt:
2186 case Hexagon::L4_return_fnew_pnt:
2187 case Hexagon::L4_return_tnew_pt:
2188 case Hexagon::L4_return_fnew_pt:
2189 return true;
2190 }
2191 return false;
2192}
2193
2194// Return true when ConsMI uses a register defined by ProdMI.
2195bool HexagonInstrInfo::isDependent(const MachineInstr &ProdMI,
2196 const MachineInstr &ConsMI) const {
2197 if (!ProdMI.getDesc().getNumDefs())
2198 return false;
2199 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
2200
2201 SmallVector<unsigned, 4> DefsA;
2202 SmallVector<unsigned, 4> DefsB;
2203 SmallVector<unsigned, 8> UsesA;
2204 SmallVector<unsigned, 8> UsesB;
2205
2206 parseOperands(ProdMI, DefsA, UsesA);
2207 parseOperands(ConsMI, DefsB, UsesB);
2208
2209 for (auto &RegA : DefsA)
2210 for (auto &RegB : UsesB) {
2211 // True data dependency.
2212 if (RegA == RegB)
2213 return true;
2214
2215 if (Register::isPhysicalRegister(RegA))
2216 for (MCSubRegIterator SubRegs(RegA, &HRI); SubRegs.isValid(); ++SubRegs)
2217 if (RegB == *SubRegs)
2218 return true;
2219
2220 if (Register::isPhysicalRegister(RegB))
2221 for (MCSubRegIterator SubRegs(RegB, &HRI); SubRegs.isValid(); ++SubRegs)
2222 if (RegA == *SubRegs)
2223 return true;
2224 }
2225
2226 return false;
2227}
2228
2229// Returns true if the instruction is alread a .cur.
2230bool HexagonInstrInfo::isDotCurInst(const MachineInstr &MI) const {
2231 switch (MI.getOpcode()) {
2232 case Hexagon::V6_vL32b_cur_pi:
2233 case Hexagon::V6_vL32b_cur_ai:
2234 return true;
2235 }
2236 return false;
2237}
2238
2239// Returns true, if any one of the operands is a dot new
2240// insn, whether it is predicated dot new or register dot new.
2241bool HexagonInstrInfo::isDotNewInst(const MachineInstr &MI) const {
2242 if (isNewValueInst(MI) || (isPredicated(MI) && isPredicatedNew(MI)))
2243 return true;
2244
2245 return false;
2246}
2247
2248/// Symmetrical. See if these two instructions are fit for duplex pair.
2249bool HexagonInstrInfo::isDuplexPair(const MachineInstr &MIa,
2250 const MachineInstr &MIb) const {
2251 HexagonII::SubInstructionGroup MIaG = getDuplexCandidateGroup(MIa);
2252 HexagonII::SubInstructionGroup MIbG = getDuplexCandidateGroup(MIb);
2253 return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG));
2254}
2255
2256bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
2257 return (Opcode == Hexagon::ENDLOOP0 ||
2258 Opcode == Hexagon::ENDLOOP1);
2259}
2260
2261bool HexagonInstrInfo::isExpr(unsigned OpType) const {
2262 switch(OpType) {
2263 case MachineOperand::MO_MachineBasicBlock:
2264 case MachineOperand::MO_GlobalAddress:
2265 case MachineOperand::MO_ExternalSymbol:
2266 case MachineOperand::MO_JumpTableIndex:
2267 case MachineOperand::MO_ConstantPoolIndex:
2268 case MachineOperand::MO_BlockAddress:
2269 return true;
2270 default:
2271 return false;
2272 }
2273}
2274
2275bool HexagonInstrInfo::isExtendable(const MachineInstr &MI) const {
2276 const MCInstrDesc &MID = MI.getDesc();
2277 const uint64_t F = MID.TSFlags;
2278 if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
2279 return true;
2280
2281 // TODO: This is largely obsolete now. Will need to be removed
2282 // in consecutive patches.
2283 switch (MI.getOpcode()) {
2284 // PS_fi and PS_fia remain special cases.
2285 case Hexagon::PS_fi:
2286 case Hexagon::PS_fia:
2287 return true;
2288 default:
2289 return false;
2290 }
2291 return false;
2292}
2293
2294// This returns true in two cases:
2295// - The OP code itself indicates that this is an extended instruction.
2296// - One of MOs has been marked with HMOTF_ConstExtended flag.
2297bool HexagonInstrInfo::isExtended(const MachineInstr &MI) const {
2298 // First check if this is permanently extended op code.
2299 const uint64_t F = MI.getDesc().TSFlags;
2300 if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask)
2301 return true;
2302 // Use MO operand flags to determine if one of MI's operands
2303 // has HMOTF_ConstExtended flag set.
2304 for (const MachineOperand &MO : MI.operands())
2305 if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
2306 return true;
2307 return false;
2308}
2309
2310bool HexagonInstrInfo::isFloat(const MachineInstr &MI) const {
2311 unsigned Opcode = MI.getOpcode();
2312 const uint64_t F = get(Opcode).TSFlags;
2313 return (F >> HexagonII::FPPos) & HexagonII::FPMask;
2314}
2315
2316// No V60 HVX VMEM with A_INDIRECT.
2317bool HexagonInstrInfo::isHVXMemWithAIndirect(const MachineInstr &I,
2318 const MachineInstr &J) const {
2319 if (!isHVXVec(I))
2320 return false;
2321 if (!I.mayLoad() && !I.mayStore())
2322 return false;
2323 return J.isIndirectBranch() || isIndirectCall(J) || isIndirectL4Return(J);
2324}
2325
2326bool HexagonInstrInfo::isIndirectCall(const MachineInstr &MI) const {
2327 switch (MI.getOpcode()) {
2328 case Hexagon::J2_callr:
2329 case Hexagon::J2_callrf:
2330 case Hexagon::J2_callrt:
2331 case Hexagon::PS_call_nr:
2332 return true;
2333 }
2334 return false;
2335}
2336
2337bool HexagonInstrInfo::isIndirectL4Return(const MachineInstr &MI) const {
2338 switch (MI.getOpcode()) {
2339 case Hexagon::L4_return:
2340 case Hexagon::L4_return_t:
2341 case Hexagon::L4_return_f:
2342 case Hexagon::L4_return_fnew_pnt:
2343 case Hexagon::L4_return_fnew_pt:
2344 case Hexagon::L4_return_tnew_pnt:
2345 case Hexagon::L4_return_tnew_pt:
2346 return true;
2347 }
2348 return false;
2349}
2350
2351bool HexagonInstrInfo::isJumpR(const MachineInstr &MI) const {
2352 switch (MI.getOpcode()) {
2353 case Hexagon::J2_jumpr:
2354 case Hexagon::J2_jumprt:
2355 case Hexagon::J2_jumprf:
2356 case Hexagon::J2_jumprtnewpt:
2357 case Hexagon::J2_jumprfnewpt:
2358 case Hexagon::J2_jumprtnew:
2359 case Hexagon::J2_jumprfnew:
2360 return true;
2361 }
2362 return false;
2363}
2364
2365// Return true if a given MI can accommodate given offset.
2366// Use abs estimate as oppose to the exact number.
2367// TODO: This will need to be changed to use MC level
2368// definition of instruction extendable field size.
2369bool HexagonInstrInfo::isJumpWithinBranchRange(const MachineInstr &MI,
2370 unsigned offset) const {
2371 // This selection of jump instructions matches to that what
2372 // analyzeBranch can parse, plus NVJ.
2373 if (isNewValueJump(MI)) // r9:2
2374 return isInt<11>(offset);
2375
2376 switch (MI.getOpcode()) {
2377 // Still missing Jump to address condition on register value.
2378 default:
2379 return false;
2380 case Hexagon::J2_jump: // bits<24> dst; // r22:2
2381 case Hexagon::J2_call:
2382 case Hexagon::PS_call_nr:
2383 return isInt<24>(offset);
2384 case Hexagon::J2_jumpt: //bits<17> dst; // r15:2
2385 case Hexagon::J2_jumpf:
2386 case Hexagon::J2_jumptnew:
2387 case Hexagon::J2_jumptnewpt:
2388 case Hexagon::J2_jumpfnew:
2389 case Hexagon::J2_jumpfnewpt:
2390 case Hexagon::J2_callt:
2391 case Hexagon::J2_callf:
2392 return isInt<17>(offset);
2393 case Hexagon::J2_loop0i:
2394 case Hexagon::J2_loop0iext:
2395 case Hexagon::J2_loop0r:
2396 case Hexagon::J2_loop0rext:
2397 case Hexagon::J2_loop1i:
2398 case Hexagon::J2_loop1iext:
2399 case Hexagon::J2_loop1r:
2400 case Hexagon::J2_loop1rext:
2401 return isInt<9>(offset);
2402 // TODO: Add all the compound branches here. Can we do this in Relation model?
2403 case Hexagon::J4_cmpeqi_tp0_jump_nt:
2404 case Hexagon::J4_cmpeqi_tp1_jump_nt:
2405 case Hexagon::J4_cmpeqn1_tp0_jump_nt:
2406 case Hexagon::J4_cmpeqn1_tp1_jump_nt:
2407 return isInt<11>(offset);
2408 }
2409}
2410
2411bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr &MI) const {
2412 // Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
2413 // resource, but all operands can be received late like an ALU instruction.
2414 return getType(MI) == HexagonII::TypeCVI_VX_LATE;
2415}
2416
2417bool HexagonInstrInfo::isLoopN(const MachineInstr &MI) const {
2418 unsigned Opcode = MI.getOpcode();
2419 return Opcode == Hexagon::J2_loop0i ||
2420 Opcode == Hexagon::J2_loop0r ||
2421 Opcode == Hexagon::J2_loop0iext ||
2422 Opcode == Hexagon::J2_loop0rext ||
2423 Opcode == Hexagon::J2_loop1i ||
2424 Opcode == Hexagon::J2_loop1r ||
2425 Opcode == Hexagon::J2_loop1iext ||
2426 Opcode == Hexagon::J2_loop1rext;
2427}
2428
2429bool HexagonInstrInfo::isMemOp(const MachineInstr &MI) const {
2430 switch (MI.getOpcode()) {
2431 default: return false;
2432 case Hexagon::L4_iadd_memopw_io:
2433 case Hexagon::L4_isub_memopw_io:
2434 case Hexagon::L4_add_memopw_io:
2435 case Hexagon::L4_sub_memopw_io:
2436 case Hexagon::L4_and_memopw_io:
2437 case Hexagon::L4_or_memopw_io:
2438 case Hexagon::L4_iadd_memoph_io:
2439 case Hexagon::L4_isub_memoph_io:
2440 case Hexagon::L4_add_memoph_io:
2441 case Hexagon::L4_sub_memoph_io:
2442 case Hexagon::L4_and_memoph_io:
2443 case Hexagon::L4_or_memoph_io:
2444 case Hexagon::L4_iadd_memopb_io:
2445 case Hexagon::L4_isub_memopb_io:
2446 case Hexagon::L4_add_memopb_io:
2447 case Hexagon::L4_sub_memopb_io:
2448 case Hexagon::L4_and_memopb_io:
2449 case Hexagon::L4_or_memopb_io:
2450 case Hexagon::L4_ior_memopb_io:
2451 case Hexagon::L4_ior_memoph_io:
2452 case Hexagon::L4_ior_memopw_io:
2453 case Hexagon::L4_iand_memopb_io:
2454 case Hexagon::L4_iand_memoph_io:
2455 case Hexagon::L4_iand_memopw_io:
2456 return true;
2457 }
2458 return false;
2459}
2460
2461bool HexagonInstrInfo::isNewValue(const MachineInstr &MI) const {
2462 const uint64_t F = MI.getDesc().TSFlags;
2463 return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2464}
2465
2466bool HexagonInstrInfo::isNewValue(unsigned Opcode) const {
2467 const uint64_t F = get(Opcode).TSFlags;
2468 return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2469}
2470
2471bool HexagonInstrInfo::isNewValueInst(const MachineInstr &MI) const {
2472 return isNewValueJump(MI) || isNewValueStore(MI);
2473}
2474
2475bool HexagonInstrInfo::isNewValueJump(const MachineInstr &MI) const {
2476 return isNewValue(MI) && MI.isBranch();
2477}
2478
2479bool HexagonInstrInfo::isNewValueJump(unsigned Opcode) const {
2480 return isNewValue(Opcode) && get(Opcode).isBranch() && isPredicated(Opcode);
2481}
2482
2483bool HexagonInstrInfo::isNewValueStore(const MachineInstr &MI) const {
2484 const uint64_t F = MI.getDesc().TSFlags;
2485 return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2486}
2487
2488bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
2489 const uint64_t F = get(Opcode).TSFlags;
2490 return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2491}
2492
2493// Returns true if a particular operand is extendable for an instruction.
2494bool HexagonInstrInfo::isOperandExtended(const MachineInstr &MI,
2495 unsigned OperandNum) const {
2496 const uint64_t F = MI.getDesc().TSFlags;
2497 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
2498 == OperandNum;
2499}
2500
2501bool HexagonInstrInfo::isPredicatedNew(const MachineInstr &MI) const {
2502 const uint64_t F = MI.getDesc().TSFlags;
2503 assert(isPredicated(MI))(static_cast <bool> (isPredicated(MI)) ? void (0) : __assert_fail
("isPredicated(MI)", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2503, __extension__ __PRETTY_FUNCTION__))
;
2504 return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2505}
2506
2507bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
2508 const uint64_t F = get(Opcode).TSFlags;
2509 assert(isPredicated(Opcode))(static_cast <bool> (isPredicated(Opcode)) ? void (0) :
__assert_fail ("isPredicated(Opcode)", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2509, __extension__ __PRETTY_FUNCTION__))
;
2510 return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2511}
2512
2513bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr &MI) const {
2514 const uint64_t F = MI.getDesc().TSFlags;
2515 return !((F >> HexagonII::PredicatedFalsePos) &
2516 HexagonII::PredicatedFalseMask);
2517}
2518
2519bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
2520 const uint64_t F = get(Opcode).TSFlags;
2521 // Make sure that the instruction is predicated.
2522 assert((F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask)(static_cast <bool> ((F>> HexagonII::PredicatedPos
) & HexagonII::PredicatedMask) ? void (0) : __assert_fail
("(F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 2522, __extension__
__PRETTY_FUNCTION__))
;
2523 return !((F >> HexagonII::PredicatedFalsePos) &
2524 HexagonII::PredicatedFalseMask);
2525}
2526
2527bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
2528 const uint64_t F = get(Opcode).TSFlags;
2529 return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
2530}
2531
2532bool HexagonInstrInfo::isPredicateLate(unsigned Opcode) const {
2533 const uint64_t F = get(Opcode).TSFlags;
2534 return (F >> HexagonII::PredicateLatePos) & HexagonII::PredicateLateMask;
2535}
2536
2537bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const {
2538 const uint64_t F = get(Opcode).TSFlags;
2539 assert(get(Opcode).isBranch() &&(static_cast <bool> (get(Opcode).isBranch() && (
isPredicatedNew(Opcode) || isNewValue(Opcode))) ? void (0) : __assert_fail
("get(Opcode).isBranch() && (isPredicatedNew(Opcode) || isNewValue(Opcode))"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 2540, __extension__
__PRETTY_FUNCTION__))
2540 (isPredicatedNew(Opcode) || isNewValue(Opcode)))(static_cast <bool> (get(Opcode).isBranch() && (
isPredicatedNew(Opcode) || isNewValue(Opcode))) ? void (0) : __assert_fail
("get(Opcode).isBranch() && (isPredicatedNew(Opcode) || isNewValue(Opcode))"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 2540, __extension__
__PRETTY_FUNCTION__))
;
2541 return (F >> HexagonII::TakenPos) & HexagonII::TakenMask;
2542}
2543
2544bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr &MI) const {
2545 return MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2546 MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
2547 MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
2548 MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
2549}
2550
2551bool HexagonInstrInfo::isSignExtendingLoad(const MachineInstr &MI) const {
2552 switch (MI.getOpcode()) {
2553 // Byte
2554 case Hexagon::L2_loadrb_io:
2555 case Hexagon::L4_loadrb_ur:
2556 case Hexagon::L4_loadrb_ap:
2557 case Hexagon::L2_loadrb_pr:
2558 case Hexagon::L2_loadrb_pbr:
2559 case Hexagon::L2_loadrb_pi:
2560 case Hexagon::L2_loadrb_pci:
2561 case Hexagon::L2_loadrb_pcr:
2562 case Hexagon::L2_loadbsw2_io:
2563 case Hexagon::L4_loadbsw2_ur:
2564 case Hexagon::L4_loadbsw2_ap:
2565 case Hexagon::L2_loadbsw2_pr:
2566 case Hexagon::L2_loadbsw2_pbr:
2567 case Hexagon::L2_loadbsw2_pi:
2568 case Hexagon::L2_loadbsw2_pci:
2569 case Hexagon::L2_loadbsw2_pcr:
2570 case Hexagon::L2_loadbsw4_io:
2571 case Hexagon::L4_loadbsw4_ur:
2572 case Hexagon::L4_loadbsw4_ap:
2573 case Hexagon::L2_loadbsw4_pr:
2574 case Hexagon::L2_loadbsw4_pbr:
2575 case Hexagon::L2_loadbsw4_pi:
2576 case Hexagon::L2_loadbsw4_pci:
2577 case Hexagon::L2_loadbsw4_pcr:
2578 case Hexagon::L4_loadrb_rr:
2579 case Hexagon::L2_ploadrbt_io:
2580 case Hexagon::L2_ploadrbt_pi:
2581 case Hexagon::L2_ploadrbf_io:
2582 case Hexagon::L2_ploadrbf_pi:
2583 case Hexagon::L2_ploadrbtnew_io:
2584 case Hexagon::L2_ploadrbfnew_io:
2585 case Hexagon::L4_ploadrbt_rr:
2586 case Hexagon::L4_ploadrbf_rr:
2587 case Hexagon::L4_ploadrbtnew_rr:
2588 case Hexagon::L4_ploadrbfnew_rr:
2589 case Hexagon::L2_ploadrbtnew_pi:
2590 case Hexagon::L2_ploadrbfnew_pi:
2591 case Hexagon::L4_ploadrbt_abs:
2592 case Hexagon::L4_ploadrbf_abs:
2593 case Hexagon::L4_ploadrbtnew_abs:
2594 case Hexagon::L4_ploadrbfnew_abs:
2595 case Hexagon::L2_loadrbgp:
2596 // Half
2597 case Hexagon::L2_loadrh_io:
2598 case Hexagon::L4_loadrh_ur:
2599 case Hexagon::L4_loadrh_ap:
2600 case Hexagon::L2_loadrh_pr:
2601 case Hexagon::L2_loadrh_pbr:
2602 case Hexagon::L2_loadrh_pi:
2603 case Hexagon::L2_loadrh_pci:
2604 case Hexagon::L2_loadrh_pcr:
2605 case Hexagon::L4_loadrh_rr:
2606 case Hexagon::L2_ploadrht_io:
2607 case Hexagon::L2_ploadrht_pi:
2608 case Hexagon::L2_ploadrhf_io:
2609 case Hexagon::L2_ploadrhf_pi:
2610 case Hexagon::L2_ploadrhtnew_io:
2611 case Hexagon::L2_ploadrhfnew_io:
2612 case Hexagon::L4_ploadrht_rr:
2613 case Hexagon::L4_ploadrhf_rr:
2614 case Hexagon::L4_ploadrhtnew_rr:
2615 case Hexagon::L4_ploadrhfnew_rr:
2616 case Hexagon::L2_ploadrhtnew_pi:
2617 case Hexagon::L2_ploadrhfnew_pi:
2618 case Hexagon::L4_ploadrht_abs:
2619 case Hexagon::L4_ploadrhf_abs:
2620 case Hexagon::L4_ploadrhtnew_abs:
2621 case Hexagon::L4_ploadrhfnew_abs:
2622 case Hexagon::L2_loadrhgp:
2623 return true;
2624 default:
2625 return false;
2626 }
2627}
2628
2629bool HexagonInstrInfo::isSolo(const MachineInstr &MI) const {
2630 const uint64_t F = MI.getDesc().TSFlags;
2631 return (F >> HexagonII::SoloPos) & HexagonII::SoloMask;
2632}
2633
2634bool HexagonInstrInfo::isSpillPredRegOp(const MachineInstr &MI) const {
2635 switch (MI.getOpcode()) {
2636 case Hexagon::STriw_pred:
2637 case Hexagon::LDriw_pred:
2638 return true;
2639 default:
2640 return false;
2641 }
2642}
2643
2644bool HexagonInstrInfo::isTailCall(const MachineInstr &MI) const {
2645 if (!MI.isBranch())
2646 return false;
2647
2648 for (auto &Op : MI.operands())
2649 if (Op.isGlobal() || Op.isSymbol())
2650 return true;
2651 return false;
2652}
2653
2654// Returns true when SU has a timing class TC1.
2655bool HexagonInstrInfo::isTC1(const MachineInstr &MI) const {
2656 unsigned SchedClass = MI.getDesc().getSchedClass();
2657 return is_TC1(SchedClass);
2658}
2659
2660bool HexagonInstrInfo::isTC2(const MachineInstr &MI) const {
2661 unsigned SchedClass = MI.getDesc().getSchedClass();
2662 return is_TC2(SchedClass);
2663}
2664
2665bool HexagonInstrInfo::isTC2Early(const MachineInstr &MI) const {
2666 unsigned SchedClass = MI.getDesc().getSchedClass();
2667 return is_TC2early(SchedClass);
2668}
2669
2670bool HexagonInstrInfo::isTC4x(const MachineInstr &MI) const {
2671 unsigned SchedClass = MI.getDesc().getSchedClass();
2672 return is_TC4x(SchedClass);
2673}
2674
2675// Schedule this ASAP.
2676bool HexagonInstrInfo::isToBeScheduledASAP(const MachineInstr &MI1,
2677 const MachineInstr &MI2) const {
2678 if (mayBeCurLoad(MI1)) {
2679 // if (result of SU is used in Next) return true;
2680 Register DstReg = MI1.getOperand(0).getReg();
2681 int N = MI2.getNumOperands();
2682 for (int I = 0; I < N; I++)
2683 if (MI2.getOperand(I).isReg() && DstReg == MI2.getOperand(I).getReg())
2684 return true;
2685 }
2686 if (mayBeNewStore(MI2))
2687 if (MI2.getOpcode() == Hexagon::V6_vS32b_pi)
2688 if (MI1.getOperand(0).isReg() && MI2.getOperand(3).isReg() &&
2689 MI1.getOperand(0).getReg() == MI2.getOperand(3).getReg())
2690 return true;
2691 return false;
2692}
2693
2694bool HexagonInstrInfo::isHVXVec(const MachineInstr &MI) const {
2695 const uint64_t V = getType(MI);
2696 return HexagonII::TypeCVI_FIRST <= V && V <= HexagonII::TypeCVI_LAST;
2697}
2698
2699// Check if the Offset is a valid auto-inc imm by Load/Store Type.
2700bool HexagonInstrInfo::isValidAutoIncImm(const EVT VT, int Offset) const {
2701 int Size = VT.getSizeInBits() / 8;
2702 if (Offset % Size != 0)
2703 return false;
2704 int Count = Offset / Size;
2705
2706 switch (VT.getSimpleVT().SimpleTy) {
2707 // For scalars the auto-inc is s4
2708 case MVT::i8:
2709 case MVT::i16:
2710 case MVT::i32:
2711 case MVT::i64:
2712 case MVT::f32:
2713 case MVT::f64:
2714 case MVT::v2i16:
2715 case MVT::v2i32:
2716 case MVT::v4i8:
2717 case MVT::v4i16:
2718 case MVT::v8i8:
2719 return isInt<4>(Count);
2720 // For HVX vectors the auto-inc is s3
2721 case MVT::v64i8:
2722 case MVT::v32i16:
2723 case MVT::v16i32:
2724 case MVT::v8i64:
2725 case MVT::v128i8:
2726 case MVT::v64i16:
2727 case MVT::v32i32:
2728 case MVT::v16i64:
2729 return isInt<3>(Count);
2730 default:
2731 break;
2732 }
2733
2734 llvm_unreachable("Not an valid type!")::llvm::llvm_unreachable_internal("Not an valid type!", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2734)
;
2735}
2736
2737bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
2738 const TargetRegisterInfo *TRI, bool Extend) const {
2739 // This function is to check whether the "Offset" is in the correct range of
2740 // the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is
2741 // inserted to calculate the final address. Due to this reason, the function
2742 // assumes that the "Offset" has correct alignment.
2743 // We used to assert if the offset was not properly aligned, however,
2744 // there are cases where a misaligned pointer recast can cause this
2745 // problem, and we need to allow for it. The front end warns of such
2746 // misaligns with respect to load size.
2747 switch (Opcode) {
2748 case Hexagon::PS_vstorerq_ai:
2749 case Hexagon::PS_vstorerv_ai:
2750 case Hexagon::PS_vstorerw_ai:
2751 case Hexagon::PS_vstorerw_nt_ai:
2752 case Hexagon::PS_vloadrq_ai:
2753 case Hexagon::PS_vloadrv_ai:
2754 case Hexagon::PS_vloadrw_ai:
2755 case Hexagon::PS_vloadrw_nt_ai:
2756 case Hexagon::V6_vL32b_ai:
2757 case Hexagon::V6_vS32b_ai:
2758 case Hexagon::V6_vS32b_qpred_ai:
2759 case Hexagon::V6_vS32b_nqpred_ai:
2760 case Hexagon::V6_vL32b_nt_ai:
2761 case Hexagon::V6_vS32b_nt_ai:
2762 case Hexagon::V6_vL32Ub_ai:
2763 case Hexagon::V6_vS32Ub_ai:
2764 case Hexagon::V6_vgathermh_pseudo:
2765 case Hexagon::V6_vgathermw_pseudo:
2766 case Hexagon::V6_vgathermhw_pseudo:
2767 case Hexagon::V6_vgathermhq_pseudo:
2768 case Hexagon::V6_vgathermwq_pseudo:
2769 case Hexagon::V6_vgathermhwq_pseudo: {
2770 unsigned VectorSize = TRI->getSpillSize(Hexagon::HvxVRRegClass);
2771 assert(isPowerOf2_32(VectorSize))(static_cast <bool> (isPowerOf2_32(VectorSize)) ? void (
0) : __assert_fail ("isPowerOf2_32(VectorSize)", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2771, __extension__ __PRETTY_FUNCTION__))
;
2772 if (Offset & (VectorSize-1))
2773 return false;
2774 return isInt<4>(Offset >> Log2_32(VectorSize));
2775 }
2776
2777 case Hexagon::J2_loop0i:
2778 case Hexagon::J2_loop1i:
2779 return isUInt<10>(Offset);
2780
2781 case Hexagon::S4_storeirb_io:
2782 case Hexagon::S4_storeirbt_io:
2783 case Hexagon::S4_storeirbf_io:
2784 return isUInt<6>(Offset);
2785
2786 case Hexagon::S4_storeirh_io:
2787 case Hexagon::S4_storeirht_io:
2788 case Hexagon::S4_storeirhf_io:
2789 return isShiftedUInt<6,1>(Offset);
2790
2791 case Hexagon::S4_storeiri_io:
2792 case Hexagon::S4_storeirit_io:
2793 case Hexagon::S4_storeirif_io:
2794 return isShiftedUInt<6,2>(Offset);
2795 // Handle these two compare instructions that are not extendable.
2796 case Hexagon::A4_cmpbeqi:
2797 return isUInt<8>(Offset);
2798 case Hexagon::A4_cmpbgti:
2799 return isInt<8>(Offset);
2800 }
2801
2802 if (Extend)
2803 return true;
2804
2805 switch (Opcode) {
2806 case Hexagon::L2_loadri_io:
2807 case Hexagon::S2_storeri_io:
2808 return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
2809 (Offset <= Hexagon_MEMW_OFFSET_MAX);
2810
2811 case Hexagon::L2_loadrd_io:
2812 case Hexagon::S2_storerd_io:
2813 return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
2814 (Offset <= Hexagon_MEMD_OFFSET_MAX);
2815
2816 case Hexagon::L2_loadrh_io:
2817 case Hexagon::L2_loadruh_io:
2818 case Hexagon::S2_storerh_io:
2819 case Hexagon::S2_storerf_io:
2820 return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
2821 (Offset <= Hexagon_MEMH_OFFSET_MAX);
2822
2823 case Hexagon::L2_loadrb_io:
2824 case Hexagon::L2_loadrub_io:
2825 case Hexagon::S2_storerb_io:
2826 return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
2827 (Offset <= Hexagon_MEMB_OFFSET_MAX);
2828
2829 case Hexagon::A2_addi:
2830 return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
2831 (Offset <= Hexagon_ADDI_OFFSET_MAX);
2832
2833 case Hexagon::L4_iadd_memopw_io:
2834 case Hexagon::L4_isub_memopw_io:
2835 case Hexagon::L4_add_memopw_io:
2836 case Hexagon::L4_sub_memopw_io:
2837 case Hexagon::L4_iand_memopw_io:
2838 case Hexagon::L4_ior_memopw_io:
2839 case Hexagon::L4_and_memopw_io:
2840 case Hexagon::L4_or_memopw_io:
2841 return (0 <= Offset && Offset <= 255);
2842
2843 case Hexagon::L4_iadd_memoph_io:
2844 case Hexagon::L4_isub_memoph_io:
2845 case Hexagon::L4_add_memoph_io:
2846 case Hexagon::L4_sub_memoph_io:
2847 case Hexagon::L4_iand_memoph_io:
2848 case Hexagon::L4_ior_memoph_io:
2849 case Hexagon::L4_and_memoph_io:
2850 case Hexagon::L4_or_memoph_io:
2851 return (0 <= Offset && Offset <= 127);
2852
2853 case Hexagon::L4_iadd_memopb_io:
2854 case Hexagon::L4_isub_memopb_io:
2855 case Hexagon::L4_add_memopb_io:
2856 case Hexagon::L4_sub_memopb_io:
2857 case Hexagon::L4_iand_memopb_io:
2858 case Hexagon::L4_ior_memopb_io:
2859 case Hexagon::L4_and_memopb_io:
2860 case Hexagon::L4_or_memopb_io:
2861 return (0 <= Offset && Offset <= 63);
2862
2863 // LDriw_xxx and STriw_xxx are pseudo operations, so it has to take offset of
2864 // any size. Later pass knows how to handle it.
2865 case Hexagon::STriw_pred:
2866 case Hexagon::LDriw_pred:
2867 case Hexagon::STriw_ctr:
2868 case Hexagon::LDriw_ctr:
2869 return true;
2870
2871 case Hexagon::PS_fi:
2872 case Hexagon::PS_fia:
2873 case Hexagon::INLINEASM:
2874 return true;
2875
2876 case Hexagon::L2_ploadrbt_io:
2877 case Hexagon::L2_ploadrbf_io:
2878 case Hexagon::L2_ploadrubt_io:
2879 case Hexagon::L2_ploadrubf_io:
2880 case Hexagon::S2_pstorerbt_io:
2881 case Hexagon::S2_pstorerbf_io:
2882 return isUInt<6>(Offset);
2883
2884 case Hexagon::L2_ploadrht_io:
2885 case Hexagon::L2_ploadrhf_io:
2886 case Hexagon::L2_ploadruht_io:
2887 case Hexagon::L2_ploadruhf_io:
2888 case Hexagon::S2_pstorerht_io:
2889 case Hexagon::S2_pstorerhf_io:
2890 return isShiftedUInt<6,1>(Offset);
2891
2892 case Hexagon::L2_ploadrit_io:
2893 case Hexagon::L2_ploadrif_io:
2894 case Hexagon::S2_pstorerit_io:
2895 case Hexagon::S2_pstorerif_io:
2896 return isShiftedUInt<6,2>(Offset);
2897
2898 case Hexagon::L2_ploadrdt_io:
2899 case Hexagon::L2_ploadrdf_io:
2900 case Hexagon::S2_pstorerdt_io:
2901 case Hexagon::S2_pstorerdf_io:
2902 return isShiftedUInt<6,3>(Offset);
2903
2904 case Hexagon::L2_loadbsw2_io:
2905 case Hexagon::L2_loadbzw2_io:
2906 return isShiftedInt<11,1>(Offset);
2907
2908 case Hexagon::L2_loadbsw4_io:
2909 case Hexagon::L2_loadbzw4_io:
2910 return isShiftedInt<11,2>(Offset);
2911 } // switch
2912
2913 dbgs() << "Failed Opcode is : " << Opcode << " (" << getName(Opcode)
2914 << ")\n";
2915 llvm_unreachable("No offset range is defined for this opcode. "::llvm::llvm_unreachable_internal("No offset range is defined for this opcode. "
"Please define it in the above switch statement!", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2916)
2916 "Please define it in the above switch statement!")::llvm::llvm_unreachable_internal("No offset range is defined for this opcode. "
"Please define it in the above switch statement!", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 2916)
;
2917}
2918
2919bool HexagonInstrInfo::isVecAcc(const MachineInstr &MI) const {
2920 return isHVXVec(MI) && isAccumulator(MI);
2921}
2922
2923bool HexagonInstrInfo::isVecALU(const MachineInstr &MI) const {
2924 const uint64_t F = get(MI.getOpcode()).TSFlags;
2925 const uint64_t V = ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
2926 return
2927 V == HexagonII::TypeCVI_VA ||
2928 V == HexagonII::TypeCVI_VA_DV;
2929}
2930
2931bool HexagonInstrInfo::isVecUsableNextPacket(const MachineInstr &ProdMI,
2932 const MachineInstr &ConsMI) const {
2933 if (EnableACCForwarding && isVecAcc(ProdMI) && isVecAcc(ConsMI))
2934 return true;
2935
2936 if (EnableALUForwarding && (isVecALU(ConsMI) || isLateSourceInstr(ConsMI)))
2937 return true;
2938
2939 if (mayBeNewStore(ConsMI))
2940 return true;
2941
2942 return false;
2943}
2944
2945bool HexagonInstrInfo::isZeroExtendingLoad(const MachineInstr &MI) const {
2946 switch (MI.getOpcode()) {
2947 // Byte
2948 case Hexagon::L2_loadrub_io:
2949 case Hexagon::L4_loadrub_ur:
2950 case Hexagon::L4_loadrub_ap:
2951 case Hexagon::L2_loadrub_pr:
2952 case Hexagon::L2_loadrub_pbr:
2953 case Hexagon::L2_loadrub_pi:
2954 case Hexagon::L2_loadrub_pci:
2955 case Hexagon::L2_loadrub_pcr:
2956 case Hexagon::L2_loadbzw2_io:
2957 case Hexagon::L4_loadbzw2_ur:
2958 case Hexagon::L4_loadbzw2_ap:
2959 case Hexagon::L2_loadbzw2_pr:
2960 case Hexagon::L2_loadbzw2_pbr:
2961 case Hexagon::L2_loadbzw2_pi:
2962 case Hexagon::L2_loadbzw2_pci:
2963 case Hexagon::L2_loadbzw2_pcr:
2964 case Hexagon::L2_loadbzw4_io:
2965 case Hexagon::L4_loadbzw4_ur:
2966 case Hexagon::L4_loadbzw4_ap:
2967 case Hexagon::L2_loadbzw4_pr:
2968 case Hexagon::L2_loadbzw4_pbr:
2969 case Hexagon::L2_loadbzw4_pi:
2970 case Hexagon::L2_loadbzw4_pci:
2971 case Hexagon::L2_loadbzw4_pcr:
2972 case Hexagon::L4_loadrub_rr:
2973 case Hexagon::L2_ploadrubt_io:
2974 case Hexagon::L2_ploadrubt_pi:
2975 case Hexagon::L2_ploadrubf_io:
2976 case Hexagon::L2_ploadrubf_pi:
2977 case Hexagon::L2_ploadrubtnew_io:
2978 case Hexagon::L2_ploadrubfnew_io:
2979 case Hexagon::L4_ploadrubt_rr:
2980 case Hexagon::L4_ploadrubf_rr:
2981 case Hexagon::L4_ploadrubtnew_rr:
2982 case Hexagon::L4_ploadrubfnew_rr:
2983 case Hexagon::L2_ploadrubtnew_pi:
2984 case Hexagon::L2_ploadrubfnew_pi:
2985 case Hexagon::L4_ploadrubt_abs:
2986 case Hexagon::L4_ploadrubf_abs:
2987 case Hexagon::L4_ploadrubtnew_abs:
2988 case Hexagon::L4_ploadrubfnew_abs:
2989 case Hexagon::L2_loadrubgp:
2990 // Half
2991 case Hexagon::L2_loadruh_io:
2992 case Hexagon::L4_loadruh_ur:
2993 case Hexagon::L4_loadruh_ap:
2994 case Hexagon::L2_loadruh_pr:
2995 case Hexagon::L2_loadruh_pbr:
2996 case Hexagon::L2_loadruh_pi:
2997 case Hexagon::L2_loadruh_pci:
2998 case Hexagon::L2_loadruh_pcr:
2999 case Hexagon::L4_loadruh_rr:
3000 case Hexagon::L2_ploadruht_io:
3001 case Hexagon::L2_ploadruht_pi:
3002 case Hexagon::L2_ploadruhf_io:
3003 case Hexagon::L2_ploadruhf_pi:
3004 case Hexagon::L2_ploadruhtnew_io:
3005 case Hexagon::L2_ploadruhfnew_io:
3006 case Hexagon::L4_ploadruht_rr:
3007 case Hexagon::L4_ploadruhf_rr:
3008 case Hexagon::L4_ploadruhtnew_rr:
3009 case Hexagon::L4_ploadruhfnew_rr:
3010 case Hexagon::L2_ploadruhtnew_pi:
3011 case Hexagon::L2_ploadruhfnew_pi:
3012 case Hexagon::L4_ploadruht_abs:
3013 case Hexagon::L4_ploadruhf_abs:
3014 case Hexagon::L4_ploadruhtnew_abs:
3015 case Hexagon::L4_ploadruhfnew_abs:
3016 case Hexagon::L2_loadruhgp:
3017 return true;
3018 default:
3019 return false;
3020 }
3021}
3022
3023// Add latency to instruction.
3024bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
3025 const MachineInstr &MI2) const {
3026 if (isHVXVec(MI1) && isHVXVec(MI2))
3027 if (!isVecUsableNextPacket(MI1, MI2))
3028 return true;
3029 return false;
3030}
3031
3032/// Get the base register and byte offset of a load/store instr.
3033bool HexagonInstrInfo::getMemOperandsWithOffsetWidth(
3034 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
3035 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
3036 const TargetRegisterInfo *TRI) const {
3037 OffsetIsScalable = false;
3038 const MachineOperand *BaseOp = getBaseAndOffset(LdSt, Offset, Width);
3039 if (!BaseOp || !BaseOp->isReg())
3040 return false;
3041 BaseOps.push_back(BaseOp);
3042 return true;
3043}
3044
3045/// Can these instructions execute at the same time in a bundle.
3046bool HexagonInstrInfo::canExecuteInBundle(const MachineInstr &First,
3047 const MachineInstr &Second) const {
3048 if (Second.mayStore() && First.getOpcode() == Hexagon::S2_allocframe) {
3049 const MachineOperand &Op = Second.getOperand(0);
3050 if (Op.isReg() && Op.isUse() && Op.getReg() == Hexagon::R29)
3051 return true;
3052 }
3053 if (DisableNVSchedule)
3054 return false;
3055 if (mayBeNewStore(Second)) {
3056 // Make sure the definition of the first instruction is the value being
3057 // stored.
3058 const MachineOperand &Stored =
3059 Second.getOperand(Second.getNumOperands() - 1);
3060 if (!Stored.isReg())
3061 return false;
3062 for (unsigned i = 0, e = First.getNumOperands(); i < e; ++i) {
3063 const MachineOperand &Op = First.getOperand(i);
3064 if (Op.isReg() && Op.isDef() && Op.getReg() == Stored.getReg())
3065 return true;
3066 }
3067 }
3068 return false;
3069}
3070
3071bool HexagonInstrInfo::doesNotReturn(const MachineInstr &CallMI) const {
3072 unsigned Opc = CallMI.getOpcode();
3073 return Opc == Hexagon::PS_call_nr || Opc == Hexagon::PS_callr_nr;
3074}
3075
3076bool HexagonInstrInfo::hasEHLabel(const MachineBasicBlock *B) const {
3077 for (auto &I : *B)
3078 if (I.isEHLabel())
3079 return true;
3080 return false;
3081}
3082
3083// Returns true if an instruction can be converted into a non-extended
3084// equivalent instruction.
3085bool HexagonInstrInfo::hasNonExtEquivalent(const MachineInstr &MI) const {
3086 short NonExtOpcode;
3087 // Check if the instruction has a register form that uses register in place
3088 // of the extended operand, if so return that as the non-extended form.
3089 if (Hexagon::getRegForm(MI.getOpcode()) >= 0)
3090 return true;
3091
3092 if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
3093 // Check addressing mode and retrieve non-ext equivalent instruction.
3094
3095 switch (getAddrMode(MI)) {
3096 case HexagonII::Absolute:
3097 // Load/store with absolute addressing mode can be converted into
3098 // base+offset mode.
3099 NonExtOpcode = Hexagon::changeAddrMode_abs_io(MI.getOpcode());
3100 break;
3101 case HexagonII::BaseImmOffset:
3102 // Load/store with base+offset addressing mode can be converted into
3103 // base+register offset addressing mode. However left shift operand should
3104 // be set to 0.
3105 NonExtOpcode = Hexagon::changeAddrMode_io_rr(MI.getOpcode());
3106 break;
3107 case HexagonII::BaseLongOffset:
3108 NonExtOpcode = Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
3109 break;
3110 default:
3111 return false;
3112 }
3113 if (NonExtOpcode < 0)
3114 return false;
3115 return true;
3116 }
3117 return false;
3118}
3119
3120bool HexagonInstrInfo::hasPseudoInstrPair(const MachineInstr &MI) const {
3121 return Hexagon::getRealHWInstr(MI.getOpcode(),
3122 Hexagon::InstrType_Pseudo) >= 0;
3123}
3124
3125bool HexagonInstrInfo::hasUncondBranch(const MachineBasicBlock *B)
3126 const {
3127 MachineBasicBlock::const_iterator I = B->getFirstTerminator(), E = B->end();
3128 while (I != E) {
3129 if (I->isBarrier())
3130 return true;
3131 ++I;
3132 }
3133 return false;
3134}
3135
3136// Returns true, if a LD insn can be promoted to a cur load.
3137bool HexagonInstrInfo::mayBeCurLoad(const MachineInstr &MI) const {
3138 const uint64_t F = MI.getDesc().TSFlags;
3139 return ((F >> HexagonII::mayCVLoadPos) & HexagonII::mayCVLoadMask) &&
3140 Subtarget.hasV60Ops();
3141}
3142
3143// Returns true, if a ST insn can be promoted to a new-value store.
3144bool HexagonInstrInfo::mayBeNewStore(const MachineInstr &MI) const {
3145 if (MI.mayStore() && !Subtarget.useNewValueStores())
3146 return false;
3147
3148 const uint64_t F = MI.getDesc().TSFlags;
3149 return (F >> HexagonII::mayNVStorePos) & HexagonII::mayNVStoreMask;
3150}
3151
3152bool HexagonInstrInfo::producesStall(const MachineInstr &ProdMI,
3153 const MachineInstr &ConsMI) const {
3154 // There is no stall when ProdMI is not a V60 vector.
3155 if (!isHVXVec(ProdMI))
3156 return false;
3157
3158 // There is no stall when ProdMI and ConsMI are not dependent.
3159 if (!isDependent(ProdMI, ConsMI))
3160 return false;
3161
3162 // When Forward Scheduling is enabled, there is no stall if ProdMI and ConsMI
3163 // are scheduled in consecutive packets.
3164 if (isVecUsableNextPacket(ProdMI, ConsMI))
3165 return false;
3166
3167 return true;
3168}
3169
3170bool HexagonInstrInfo::producesStall(const MachineInstr &MI,
3171 MachineBasicBlock::const_instr_iterator BII) const {
3172 // There is no stall when I is not a V60 vector.
3173 if (!isHVXVec(MI))
3174 return false;
3175
3176 MachineBasicBlock::const_instr_iterator MII = BII;
3177 MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end();
3178
3179 if (!MII->isBundle())
3180 return producesStall(*MII, MI);
3181
3182 for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
3183 const MachineInstr &J = *MII;
3184 if (producesStall(J, MI))
3185 return true;
3186 }
3187 return false;
3188}
3189
3190bool HexagonInstrInfo::predCanBeUsedAsDotNew(const MachineInstr &MI,
3191 unsigned PredReg) const {
3192 for (const MachineOperand &MO : MI.operands()) {
3193 // Predicate register must be explicitly defined.
3194 if (MO.isRegMask() && MO.clobbersPhysReg(PredReg))
3195 return false;
3196 if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
3197 return false;
3198 }
3199
3200 // Instruction that produce late predicate cannot be used as sources of
3201 // dot-new.
3202 switch (MI.getOpcode()) {
3203 case Hexagon::A4_addp_c:
3204 case Hexagon::A4_subp_c:
3205 case Hexagon::A4_tlbmatch:
3206 case Hexagon::A5_ACS:
3207 case Hexagon::F2_sfinvsqrta:
3208 case Hexagon::F2_sfrecipa:
3209 case Hexagon::J2_endloop0:
3210 case Hexagon::J2_endloop01:
3211 case Hexagon::J2_ploop1si:
3212 case Hexagon::J2_ploop1sr:
3213 case Hexagon::J2_ploop2si:
3214 case Hexagon::J2_ploop2sr:
3215 case Hexagon::J2_ploop3si:
3216 case Hexagon::J2_ploop3sr:
3217 case Hexagon::S2_cabacdecbin:
3218 case Hexagon::S2_storew_locked:
3219 case Hexagon::S4_stored_locked:
3220 return false;
3221 }
3222 return true;
3223}
3224
3225bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const {
3226 return Opcode == Hexagon::J2_jumpt ||
3227 Opcode == Hexagon::J2_jumptpt ||
3228 Opcode == Hexagon::J2_jumpf ||
3229 Opcode == Hexagon::J2_jumpfpt ||
3230 Opcode == Hexagon::J2_jumptnew ||
3231 Opcode == Hexagon::J2_jumpfnew ||
3232 Opcode == Hexagon::J2_jumptnewpt ||
3233 Opcode == Hexagon::J2_jumpfnewpt;
3234}
3235
3236bool HexagonInstrInfo::predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const {
3237 if (Cond.empty() || !isPredicated(Cond[0].getImm()))
3238 return false;
3239 return !isPredicatedTrue(Cond[0].getImm());
3240}
3241
3242unsigned HexagonInstrInfo::getAddrMode(const MachineInstr &MI) const {
3243 const uint64_t F = MI.getDesc().TSFlags;
3244 return (F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask;
3245}
3246
3247// Returns the base register in a memory access (load/store). The offset is
3248// returned in Offset and the access size is returned in AccessSize.
3249// If the base operand has a subregister or the offset field does not contain
3250// an immediate value, return nullptr.
3251MachineOperand *HexagonInstrInfo::getBaseAndOffset(const MachineInstr &MI,
3252 int64_t &Offset,
3253 unsigned &AccessSize) const {
3254 // Return if it is not a base+offset type instruction or a MemOp.
3255 if (getAddrMode(MI) != HexagonII::BaseImmOffset &&
3256 getAddrMode(MI) != HexagonII::BaseLongOffset &&
3257 !isMemOp(MI) && !isPostIncrement(MI))
3258 return nullptr;
3259
3260 AccessSize = getMemAccessSize(MI);
3261
3262 unsigned BasePos = 0, OffsetPos = 0;
3263 if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
3264 return nullptr;
3265
3266 // Post increment updates its EA after the mem access,
3267 // so we need to treat its offset as zero.
3268 if (isPostIncrement(MI)) {
3269 Offset = 0;
3270 } else {
3271 const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
3272 if (!OffsetOp.isImm())
3273 return nullptr;
3274 Offset = OffsetOp.getImm();
3275 }
3276
3277 const MachineOperand &BaseOp = MI.getOperand(BasePos);
3278 if (BaseOp.getSubReg() != 0)
3279 return nullptr;
3280 return &const_cast<MachineOperand&>(BaseOp);
3281}
3282
3283/// Return the position of the base and offset operands for this instruction.
3284bool HexagonInstrInfo::getBaseAndOffsetPosition(const MachineInstr &MI,
3285 unsigned &BasePos, unsigned &OffsetPos) const {
3286 if (!isAddrModeWithOffset(MI) && !isPostIncrement(MI))
3287 return false;
3288
3289 // Deal with memops first.
3290 if (isMemOp(MI)) {
3291 BasePos = 0;
3292 OffsetPos = 1;
3293 } else if (MI.mayStore()) {
3294 BasePos = 0;
3295 OffsetPos = 1;
3296 } else if (MI.mayLoad()) {
3297 BasePos = 1;
3298 OffsetPos = 2;
3299 } else
3300 return false;
3301
3302 if (isPredicated(MI)) {
3303 BasePos++;
3304 OffsetPos++;
3305 }
3306 if (isPostIncrement(MI)) {
3307 BasePos++;
3308 OffsetPos++;
3309 }
3310
3311 if (!MI.getOperand(BasePos).isReg() || !MI.getOperand(OffsetPos).isImm())
3312 return false;
3313
3314 return true;
3315}
3316
3317// Inserts branching instructions in reverse order of their occurrence.
3318// e.g. jump_t t1 (i1)
3319// jump t2 (i2)
3320// Jumpers = {i2, i1}
3321SmallVector<MachineInstr*, 2> HexagonInstrInfo::getBranchingInstrs(
3322 MachineBasicBlock& MBB) const {
3323 SmallVector<MachineInstr*, 2> Jumpers;
3324 // If the block has no terminators, it just falls into the block after it.
3325 MachineBasicBlock::instr_iterator I = MBB.instr_end();
3326 if (I == MBB.instr_begin())
3327 return Jumpers;
3328
3329 // A basic block may looks like this:
3330 //
3331 // [ insn
3332 // EH_LABEL
3333 // insn
3334 // insn
3335 // insn
3336 // EH_LABEL
3337 // insn ]
3338 //
3339 // It has two succs but does not have a terminator
3340 // Don't know how to handle it.
3341 do {
3342 --I;
3343 if (I->isEHLabel())
3344 return Jumpers;
3345 } while (I != MBB.instr_begin());
3346
3347 I = MBB.instr_end();
3348 --I;
3349
3350 while (I->isDebugInstr()) {
3351 if (I == MBB.instr_begin())
3352 return Jumpers;
3353 --I;
3354 }
3355 if (!isUnpredicatedTerminator(*I))
3356 return Jumpers;
3357
3358 // Get the last instruction in the block.
3359 MachineInstr *LastInst = &*I;
3360 Jumpers.push_back(LastInst);
3361 MachineInstr *SecondLastInst = nullptr;
3362 // Find one more terminator if present.
3363 do {
3364 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
3365 if (!SecondLastInst) {
3366 SecondLastInst = &*I;
3367 Jumpers.push_back(SecondLastInst);
3368 } else // This is a third branch.
3369 return Jumpers;
3370 }
3371 if (I == MBB.instr_begin())
3372 break;
3373 --I;
3374 } while (true);
3375 return Jumpers;
3376}
3377
3378// Returns Operand Index for the constant extended instruction.
3379unsigned HexagonInstrInfo::getCExtOpNum(const MachineInstr &MI) const {
3380 const uint64_t F = MI.getDesc().TSFlags;
3381 return (F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask;
3382}
3383
3384// See if instruction could potentially be a duplex candidate.
3385// If so, return its group. Zero otherwise.
3386HexagonII::CompoundGroup HexagonInstrInfo::getCompoundCandidateGroup(
3387 const MachineInstr &MI) const {
3388 unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3389
3390 switch (MI.getOpcode()) {
3391 default:
3392 return HexagonII::HCG_None;
3393 //
3394 // Compound pairs.
3395 // "p0=cmp.eq(Rs16,Rt16); if (p0.new) jump:nt #r9:2"
3396 // "Rd16=#U6 ; jump #r9:2"
3397 // "Rd16=Rs16 ; jump #r9:2"
3398 //
3399 case Hexagon::C2_cmpeq:
3400 case Hexagon::C2_cmpgt:
3401 case Hexagon::C2_cmpgtu:
3402 DstReg = MI.getOperand(0).getReg();
3403 Src1Reg = MI.getOperand(1).getReg();
3404 Src2Reg = MI.getOperand(2).getReg();
3405 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3406 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3407 isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg))
3408 return HexagonII::HCG_A;
3409 break;
3410 case Hexagon::C2_cmpeqi:
3411 case Hexagon::C2_cmpgti:
3412 case Hexagon::C2_cmpgtui:
3413 // P0 = cmp.eq(Rs,#u2)
3414 DstReg = MI.getOperand(0).getReg();
3415 SrcReg = MI.getOperand(1).getReg();
3416 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3417 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3418 isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
3419 ((isUInt<5>(MI.getOperand(2).getImm())) ||
3420 (MI.getOperand(2).getImm() == -1)))
3421 return HexagonII::HCG_A;
3422 break;
3423 case Hexagon::A2_tfr:
3424 // Rd = Rs
3425 DstReg = MI.getOperand(0).getReg();
3426 SrcReg = MI.getOperand(1).getReg();
3427 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3428 return HexagonII::HCG_A;
3429 break;
3430 case Hexagon::A2_tfrsi:
3431 // Rd = #u6
3432 // Do not test for #u6 size since the const is getting extended
3433 // regardless and compound could be formed.
3434 DstReg = MI.getOperand(0).getReg();
3435 if (isIntRegForSubInst(DstReg))
3436 return HexagonII::HCG_A;
3437 break;
3438 case Hexagon::S2_tstbit_i:
3439 DstReg = MI.getOperand(0).getReg();
3440 Src1Reg = MI.getOperand(1).getReg();
3441 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3442 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3443 MI.getOperand(2).isImm() &&
3444 isIntRegForSubInst(Src1Reg) && (MI.getOperand(2).getImm() == 0))
3445 return HexagonII::HCG_A;
3446 break;
3447 // The fact that .new form is used pretty much guarantees
3448 // that predicate register will match. Nevertheless,
3449 // there could be some false positives without additional
3450 // checking.
3451 case Hexagon::J2_jumptnew:
3452 case Hexagon::J2_jumpfnew:
3453 case Hexagon::J2_jumptnewpt:
3454 case Hexagon::J2_jumpfnewpt:
3455 Src1Reg = MI.getOperand(0).getReg();
3456 if (Hexagon::PredRegsRegClass.contains(Src1Reg) &&
3457 (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
3458 return HexagonII::HCG_B;
3459 break;
3460 // Transfer and jump:
3461 // Rd=#U6 ; jump #r9:2
3462 // Rd=Rs ; jump #r9:2
3463 // Do not test for jump range here.
3464 case Hexagon::J2_jump:
3465 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3466 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3467 return HexagonII::HCG_C;
3468 }
3469
3470 return HexagonII::HCG_None;
3471}
3472
3473// Returns -1 when there is no opcode found.
3474unsigned HexagonInstrInfo::getCompoundOpcode(const MachineInstr &GA,
3475 const MachineInstr &GB) const {
3476 assert(getCompoundCandidateGroup(GA) == HexagonII::HCG_A)(static_cast <bool> (getCompoundCandidateGroup(GA) == HexagonII
::HCG_A) ? void (0) : __assert_fail ("getCompoundCandidateGroup(GA) == HexagonII::HCG_A"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3476, __extension__
__PRETTY_FUNCTION__))
;
3477 assert(getCompoundCandidateGroup(GB) == HexagonII::HCG_B)(static_cast <bool> (getCompoundCandidateGroup(GB) == HexagonII
::HCG_B) ? void (0) : __assert_fail ("getCompoundCandidateGroup(GB) == HexagonII::HCG_B"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3477, __extension__
__PRETTY_FUNCTION__))
;
3478 if ((GA.getOpcode() != Hexagon::C2_cmpeqi) ||
3479 (GB.getOpcode() != Hexagon::J2_jumptnew))
3480 return -1u;
3481 Register DestReg = GA.getOperand(0).getReg();
3482 if (!GB.readsRegister(DestReg))
3483 return -1u;
3484 if (DestReg != Hexagon::P0 && DestReg != Hexagon::P1)
3485 return -1u;
3486 // The value compared against must be either u5 or -1.
3487 const MachineOperand &CmpOp = GA.getOperand(2);
3488 if (!CmpOp.isImm())
3489 return -1u;
3490 int V = CmpOp.getImm();
3491 if (V == -1)
3492 return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqn1_tp0_jump_nt
3493 : Hexagon::J4_cmpeqn1_tp1_jump_nt;
3494 if (!isUInt<5>(V))
3495 return -1u;
3496 return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqi_tp0_jump_nt
3497 : Hexagon::J4_cmpeqi_tp1_jump_nt;
3498}
3499
3500// Returns -1 if there is no opcode found.
3501int HexagonInstrInfo::getDuplexOpcode(const MachineInstr &MI,
3502 bool ForBigCore) const {
3503 // Static table to switch the opcodes across Tiny Core and Big Core.
3504 // dup_ opcodes are Big core opcodes.
3505 // NOTE: There are special instructions that need to handled later.
3506 // L4_return* instructions, they will only occupy SLOT0 (on big core too).
3507 // PS_jmpret - This pseudo translates to J2_jumpr which occupies only SLOT2.
3508 // The compiler need to base the root instruction to L6_return_map_to_raw
3509 // which can go any slot.
3510 static const std::map<unsigned, unsigned> DupMap = {
3511 {Hexagon::A2_add, Hexagon::dup_A2_add},
3512 {Hexagon::A2_addi, Hexagon::dup_A2_addi},
3513 {Hexagon::A2_andir, Hexagon::dup_A2_andir},
3514 {Hexagon::A2_combineii, Hexagon::dup_A2_combineii},
3515 {Hexagon::A2_sxtb, Hexagon::dup_A2_sxtb},
3516 {Hexagon::A2_sxth, Hexagon::dup_A2_sxth},
3517 {Hexagon::A2_tfr, Hexagon::dup_A2_tfr},
3518 {Hexagon::A2_tfrsi, Hexagon::dup_A2_tfrsi},
3519 {Hexagon::A2_zxtb, Hexagon::dup_A2_zxtb},
3520 {Hexagon::A2_zxth, Hexagon::dup_A2_zxth},
3521 {Hexagon::A4_combineii, Hexagon::dup_A4_combineii},
3522 {Hexagon::A4_combineir, Hexagon::dup_A4_combineir},
3523 {Hexagon::A4_combineri, Hexagon::dup_A4_combineri},
3524 {Hexagon::C2_cmoveif, Hexagon::dup_C2_cmoveif},
3525 {Hexagon::C2_cmoveit, Hexagon::dup_C2_cmoveit},
3526 {Hexagon::C2_cmovenewif, Hexagon::dup_C2_cmovenewif},
3527 {Hexagon::C2_cmovenewit, Hexagon::dup_C2_cmovenewit},
3528 {Hexagon::C2_cmpeqi, Hexagon::dup_C2_cmpeqi},
3529 {Hexagon::L2_deallocframe, Hexagon::dup_L2_deallocframe},
3530 {Hexagon::L2_loadrb_io, Hexagon::dup_L2_loadrb_io},
3531 {Hexagon::L2_loadrd_io, Hexagon::dup_L2_loadrd_io},
3532 {Hexagon::L2_loadrh_io, Hexagon::dup_L2_loadrh_io},
3533 {Hexagon::L2_loadri_io, Hexagon::dup_L2_loadri_io},
3534 {Hexagon::L2_loadrub_io, Hexagon::dup_L2_loadrub_io},
3535 {Hexagon::L2_loadruh_io, Hexagon::dup_L2_loadruh_io},
3536 {Hexagon::S2_allocframe, Hexagon::dup_S2_allocframe},
3537 {Hexagon::S2_storerb_io, Hexagon::dup_S2_storerb_io},
3538 {Hexagon::S2_storerd_io, Hexagon::dup_S2_storerd_io},
3539 {Hexagon::S2_storerh_io, Hexagon::dup_S2_storerh_io},
3540 {Hexagon::S2_storeri_io, Hexagon::dup_S2_storeri_io},
3541 {Hexagon::S4_storeirb_io, Hexagon::dup_S4_storeirb_io},
3542 {Hexagon::S4_storeiri_io, Hexagon::dup_S4_storeiri_io},
3543 };
3544 unsigned OpNum = MI.getOpcode();
3545 // Conversion to Big core.
3546 if (ForBigCore) {
3547 auto Iter = DupMap.find(OpNum);
3548 if (Iter != DupMap.end())
3549 return Iter->second;
3550 } else { // Conversion to Tiny core.
3551 for (const auto &Iter : DupMap)
3552 if (Iter.second == OpNum)
3553 return Iter.first;
3554 }
3555 return -1;
3556}
3557
3558int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const {
3559 enum Hexagon::PredSense inPredSense;
3560 inPredSense = invertPredicate ? Hexagon::PredSense_false :
3561 Hexagon::PredSense_true;
3562 int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
3563 if (CondOpcode >= 0) // Valid Conditional opcode/instruction
3564 return CondOpcode;
3565
3566 llvm_unreachable("Unexpected predicable instruction")::llvm::llvm_unreachable_internal("Unexpected predicable instruction"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3566)
;
3567}
3568
3569// Return the cur value instruction for a given store.
3570int HexagonInstrInfo::getDotCurOp(const MachineInstr &MI) const {
3571 switch (MI.getOpcode()) {
3572 default: llvm_unreachable("Unknown .cur type")::llvm::llvm_unreachable_internal("Unknown .cur type", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 3572)
;
3573 case Hexagon::V6_vL32b_pi:
3574 return Hexagon::V6_vL32b_cur_pi;
3575 case Hexagon::V6_vL32b_ai:
3576 return Hexagon::V6_vL32b_cur_ai;
3577 case Hexagon::V6_vL32b_nt_pi:
3578 return Hexagon::V6_vL32b_nt_cur_pi;
3579 case Hexagon::V6_vL32b_nt_ai:
3580 return Hexagon::V6_vL32b_nt_cur_ai;
3581 case Hexagon::V6_vL32b_ppu:
3582 return Hexagon::V6_vL32b_cur_ppu;
3583 case Hexagon::V6_vL32b_nt_ppu:
3584 return Hexagon::V6_vL32b_nt_cur_ppu;
3585 }
3586 return 0;
3587}
3588
3589// Return the regular version of the .cur instruction.
3590int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
3591 switch (MI.getOpcode()) {
3592 default: llvm_unreachable("Unknown .cur type")::llvm::llvm_unreachable_internal("Unknown .cur type", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 3592)
;
3593 case Hexagon::V6_vL32b_cur_pi:
3594 return Hexagon::V6_vL32b_pi;
3595 case Hexagon::V6_vL32b_cur_ai:
3596 return Hexagon::V6_vL32b_ai;
3597 case Hexagon::V6_vL32b_nt_cur_pi:
3598 return Hexagon::V6_vL32b_nt_pi;
3599 case Hexagon::V6_vL32b_nt_cur_ai:
3600 return Hexagon::V6_vL32b_nt_ai;
3601 case Hexagon::V6_vL32b_cur_ppu:
3602 return Hexagon::V6_vL32b_ppu;
3603 case Hexagon::V6_vL32b_nt_cur_ppu:
3604 return Hexagon::V6_vL32b_nt_ppu;
3605 }
3606 return 0;
3607}
3608
3609// The diagram below shows the steps involved in the conversion of a predicated
3610// store instruction to its .new predicated new-value form.
3611//
3612// Note: It doesn't include conditional new-value stores as they can't be
3613// converted to .new predicate.
3614//
3615// p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
3616// ^ ^
3617// / \ (not OK. it will cause new-value store to be
3618// / X conditional on p0.new while R2 producer is
3619// / \ on p0)
3620// / \.
3621// p.new store p.old NV store
3622// [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
3623// ^ ^
3624// \ /
3625// \ /
3626// \ /
3627// p.old store
3628// [if (p0)memw(R0+#0)=R2]
3629//
3630// The following set of instructions further explains the scenario where
3631// conditional new-value store becomes invalid when promoted to .new predicate
3632// form.
3633//
3634// { 1) if (p0) r0 = add(r1, r2)
3635// 2) p0 = cmp.eq(r3, #0) }
3636//
3637// 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
3638// the first two instructions because in instr 1, r0 is conditional on old value
3639// of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
3640// is not valid for new-value stores.
3641// Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
3642// from the "Conditional Store" list. Because a predicated new value store
3643// would NOT be promoted to a double dot new store. See diagram below:
3644// This function returns yes for those stores that are predicated but not
3645// yet promoted to predicate dot new instructions.
3646//
3647// +---------------------+
3648// /-----| if (p0) memw(..)=r0 |---------\~
3649// || +---------------------+ ||
3650// promote || /\ /\ || promote
3651// || /||\ /||\ ||
3652// \||/ demote || \||/
3653// \/ || || \/
3654// +-------------------------+ || +-------------------------+
3655// | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
3656// +-------------------------+ || +-------------------------+
3657// || || ||
3658// || demote \||/
3659// promote || \/ NOT possible
3660// || || /\~
3661// \||/ || /||\~
3662// \/ || ||
3663// +-----------------------------+
3664// | if (p0.new) memw(..)=r0.new |
3665// +-----------------------------+
3666// Double Dot New Store
3667//
3668// Returns the most basic instruction for the .new predicated instructions and
3669// new-value stores.
3670// For example, all of the following instructions will be converted back to the
3671// same instruction:
3672// 1) if (p0.new) memw(R0+#0) = R1.new --->
3673// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
3674// 3) if (p0.new) memw(R0+#0) = R1 --->
3675//
3676// To understand the translation of instruction 1 to its original form, consider
3677// a packet with 3 instructions.
3678// { p0 = cmp.eq(R0,R1)
3679// if (p0.new) R2 = add(R3, R4)
3680// R5 = add (R3, R1)
3681// }
3682// if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
3683//
3684// This instruction can be part of the previous packet only if both p0 and R2
3685// are promoted to .new values. This promotion happens in steps, first
3686// predicate register is promoted to .new and in the next iteration R2 is
3687// promoted. Therefore, in case of dependence check failure (due to R5) during
3688// next iteration, it should be converted back to its most basic form.
3689
3690// Return the new value instruction for a given store.
3691int HexagonInstrInfo::getDotNewOp(const MachineInstr &MI) const {
3692 int NVOpcode = Hexagon::getNewValueOpcode(MI.getOpcode());
3693 if (NVOpcode >= 0) // Valid new-value store instruction.
3694 return NVOpcode;
3695
3696 switch (MI.getOpcode()) {
3697 default:
3698 report_fatal_error(Twine("Unknown .new type: ") +
3699 std::to_string(MI.getOpcode()));
3700 case Hexagon::S4_storerb_ur:
3701 return Hexagon::S4_storerbnew_ur;
3702
3703 case Hexagon::S2_storerb_pci:
3704 return Hexagon::S2_storerb_pci;
3705
3706 case Hexagon::S2_storeri_pci:
3707 return Hexagon::S2_storeri_pci;
3708
3709 case Hexagon::S2_storerh_pci:
3710 return Hexagon::S2_storerh_pci;
3711
3712 case Hexagon::S2_storerd_pci:
3713 return Hexagon::S2_storerd_pci;
3714
3715 case Hexagon::S2_storerf_pci:
3716 return Hexagon::S2_storerf_pci;
3717
3718 case Hexagon::V6_vS32b_ai:
3719 return Hexagon::V6_vS32b_new_ai;
3720
3721 case Hexagon::V6_vS32b_pi:
3722 return Hexagon::V6_vS32b_new_pi;
3723 }
3724 return 0;
3725}
3726
3727// Returns the opcode to use when converting MI, which is a conditional jump,
3728// into a conditional instruction which uses the .new value of the predicate.
3729// We also use branch probabilities to add a hint to the jump.
3730// If MBPI is null, all edges will be treated as equally likely for the
3731// purposes of establishing a predication hint.
3732int HexagonInstrInfo::getDotNewPredJumpOp(const MachineInstr &MI,
3733 const MachineBranchProbabilityInfo *MBPI) const {
3734 // We assume that block can have at most two successors.
3735 const MachineBasicBlock *Src = MI.getParent();
3736 const MachineOperand &BrTarget = MI.getOperand(1);
3737 bool Taken = false;
3738 const BranchProbability OneHalf(1, 2);
3739
3740 auto getEdgeProbability = [MBPI] (const MachineBasicBlock *Src,
3741 const MachineBasicBlock *Dst) {
3742 if (MBPI)
3743 return MBPI->getEdgeProbability(Src, Dst);
3744 return BranchProbability(1, Src->succ_size());
3745 };
3746
3747 if (BrTarget.isMBB()) {
3748 const MachineBasicBlock *Dst = BrTarget.getMBB();
3749 Taken = getEdgeProbability(Src, Dst) >= OneHalf;
3750 } else {
3751 // The branch target is not a basic block (most likely a function).
3752 // Since BPI only gives probabilities for targets that are basic blocks,
3753 // try to identify another target of this branch (potentially a fall-
3754 // -through) and check the probability of that target.
3755 //
3756 // The only handled branch combinations are:
3757 // - one conditional branch,
3758 // - one conditional branch followed by one unconditional branch.
3759 // Otherwise, assume not-taken.
3760 assert(MI.isConditionalBranch())(static_cast <bool> (MI.isConditionalBranch()) ? void (
0) : __assert_fail ("MI.isConditionalBranch()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 3760, __extension__ __PRETTY_FUNCTION__))
;
3761 const MachineBasicBlock &B = *MI.getParent();
3762 bool SawCond = false, Bad = false;
3763 for (const MachineInstr &I : B) {
3764 if (!I.isBranch())
3765 continue;
3766 if (I.isConditionalBranch()) {
3767 SawCond = true;
3768 if (&I != &MI) {
3769 Bad = true;
3770 break;
3771 }
3772 }
3773 if (I.isUnconditionalBranch() && !SawCond) {
3774 Bad = true;
3775 break;
3776 }
3777 }
3778 if (!Bad) {
3779 MachineBasicBlock::const_instr_iterator It(MI);
3780 MachineBasicBlock::const_instr_iterator NextIt = std::next(It);
3781 if (NextIt == B.instr_end()) {
3782 // If this branch is the last, look for the fall-through block.
3783 for (const MachineBasicBlock *SB : B.successors()) {
3784 if (!B.isLayoutSuccessor(SB))
3785 continue;
3786 Taken = getEdgeProbability(Src, SB) < OneHalf;
3787 break;
3788 }
3789 } else {
3790 assert(NextIt->isUnconditionalBranch())(static_cast <bool> (NextIt->isUnconditionalBranch()
) ? void (0) : __assert_fail ("NextIt->isUnconditionalBranch()"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3790, __extension__
__PRETTY_FUNCTION__))
;
3791 // Find the first MBB operand and assume it's the target.
3792 const MachineBasicBlock *BT = nullptr;
3793 for (const MachineOperand &Op : NextIt->operands()) {
3794 if (!Op.isMBB())
3795 continue;
3796 BT = Op.getMBB();
3797 break;
3798 }
3799 Taken = BT && getEdgeProbability(Src, BT) < OneHalf;
3800 }
3801 } // if (!Bad)
3802 }
3803
3804 // The Taken flag should be set to something reasonable by this point.
3805
3806 switch (MI.getOpcode()) {
3807 case Hexagon::J2_jumpt:
3808 return Taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3809 case Hexagon::J2_jumpf:
3810 return Taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3811
3812 default:
3813 llvm_unreachable("Unexpected jump instruction.")::llvm::llvm_unreachable_internal("Unexpected jump instruction."
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3813)
;
3814 }
3815}
3816
3817// Return .new predicate version for an instruction.
3818int HexagonInstrInfo::getDotNewPredOp(const MachineInstr &MI,
3819 const MachineBranchProbabilityInfo *MBPI) const {
3820 switch (MI.getOpcode()) {
3821 // Condtional Jumps
3822 case Hexagon::J2_jumpt:
3823 case Hexagon::J2_jumpf:
3824 return getDotNewPredJumpOp(MI, MBPI);
3825 }
3826
3827 int NewOpcode = Hexagon::getPredNewOpcode(MI.getOpcode());
3828 if (NewOpcode >= 0)
3829 return NewOpcode;
3830 return 0;
3831}
3832
3833int HexagonInstrInfo::getDotOldOp(const MachineInstr &MI) const {
3834 int NewOp = MI.getOpcode();
3835 if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
3836 NewOp = Hexagon::getPredOldOpcode(NewOp);
3837 // All Hexagon architectures have prediction bits on dot-new branches,
3838 // but only Hexagon V60+ has prediction bits on dot-old ones. Make sure
3839 // to pick the right opcode when converting back to dot-old.
3840 if (!Subtarget.getFeatureBits()[Hexagon::ArchV60]) {
3841 switch (NewOp) {
3842 case Hexagon::J2_jumptpt:
3843 NewOp = Hexagon::J2_jumpt;
3844 break;
3845 case Hexagon::J2_jumpfpt:
3846 NewOp = Hexagon::J2_jumpf;
3847 break;
3848 case Hexagon::J2_jumprtpt:
3849 NewOp = Hexagon::J2_jumprt;
3850 break;
3851 case Hexagon::J2_jumprfpt:
3852 NewOp = Hexagon::J2_jumprf;
3853 break;
3854 }
3855 }
3856 assert(NewOp >= 0 &&(static_cast <bool> (NewOp >= 0 && "Couldn't change predicate new instruction to its old form."
) ? void (0) : __assert_fail ("NewOp >= 0 && \"Couldn't change predicate new instruction to its old form.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3857, __extension__
__PRETTY_FUNCTION__))
3857 "Couldn't change predicate new instruction to its old form.")(static_cast <bool> (NewOp >= 0 && "Couldn't change predicate new instruction to its old form."
) ? void (0) : __assert_fail ("NewOp >= 0 && \"Couldn't change predicate new instruction to its old form.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3857, __extension__
__PRETTY_FUNCTION__))
;
3858 }
3859
3860 if (isNewValueStore(NewOp)) { // Convert into non-new-value format
3861 NewOp = Hexagon::getNonNVStore(NewOp);
3862 assert(NewOp >= 0 && "Couldn't change new-value store to its old form.")(static_cast <bool> (NewOp >= 0 && "Couldn't change new-value store to its old form."
) ? void (0) : __assert_fail ("NewOp >= 0 && \"Couldn't change new-value store to its old form.\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 3862, __extension__
__PRETTY_FUNCTION__))
;
3863 }
3864
3865 if (Subtarget.hasV60Ops())
3866 return NewOp;
3867
3868 // Subtargets prior to V60 didn't support 'taken' forms of predicated jumps.
3869 switch (NewOp) {
3870 case Hexagon::J2_jumpfpt:
3871 return Hexagon::J2_jumpf;
3872 case Hexagon::J2_jumptpt:
3873 return Hexagon::J2_jumpt;
3874 case Hexagon::J2_jumprfpt:
3875 return Hexagon::J2_jumprf;
3876 case Hexagon::J2_jumprtpt:
3877 return Hexagon::J2_jumprt;
3878 }
3879 return NewOp;
3880}
3881
3882// See if instruction could potentially be a duplex candidate.
3883// If so, return its group. Zero otherwise.
3884HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup(
3885 const MachineInstr &MI) const {
3886 unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3887 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
3888
3889 switch (MI.getOpcode()) {
3890 default:
3891 return HexagonII::HSIG_None;
3892 //
3893 // Group L1:
3894 //
3895 // Rd = memw(Rs+#u4:2)
3896 // Rd = memub(Rs+#u4:0)
3897 case Hexagon::L2_loadri_io:
3898 case Hexagon::dup_L2_loadri_io:
3899 DstReg = MI.getOperand(0).getReg();
3900 SrcReg = MI.getOperand(1).getReg();
3901 // Special case this one from Group L2.
3902 // Rd = memw(r29+#u5:2)
3903 if (isIntRegForSubInst(DstReg)) {
3904 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3905 HRI.getStackRegister() == SrcReg &&
3906 MI.getOperand(2).isImm() &&
3907 isShiftedUInt<5,2>(MI.getOperand(2).getImm()))
3908 return HexagonII::HSIG_L2;
3909 // Rd = memw(Rs+#u4:2)
3910 if (isIntRegForSubInst(SrcReg) &&
3911 (MI.getOperand(2).isImm() &&
3912 isShiftedUInt<4,2>(MI.getOperand(2).getImm())))
3913 return HexagonII::HSIG_L1;
3914 }
3915 break;
3916 case Hexagon::L2_loadrub_io:
3917 case Hexagon::dup_L2_loadrub_io:
3918 // Rd = memub(Rs+#u4:0)
3919 DstReg = MI.getOperand(0).getReg();
3920 SrcReg = MI.getOperand(1).getReg();
3921 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3922 MI.getOperand(2).isImm() && isUInt<4>(MI.getOperand(2).getImm()))
3923 return HexagonII::HSIG_L1;
3924 break;
3925 //
3926 // Group L2:
3927 //
3928 // Rd = memh/memuh(Rs+#u3:1)
3929 // Rd = memb(Rs+#u3:0)
3930 // Rd = memw(r29+#u5:2) - Handled above.
3931 // Rdd = memd(r29+#u5:3)
3932 // deallocframe
3933 // [if ([!]p0[.new])] dealloc_return
3934 // [if ([!]p0[.new])] jumpr r31
3935 case Hexagon::L2_loadrh_io:
3936 case Hexagon::L2_loadruh_io:
3937 case Hexagon::dup_L2_loadrh_io:
3938 case Hexagon::dup_L2_loadruh_io:
3939 // Rd = memh/memuh(Rs+#u3:1)
3940 DstReg = MI.getOperand(0).getReg();
3941 SrcReg = MI.getOperand(1).getReg();
3942 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3943 MI.getOperand(2).isImm() &&
3944 isShiftedUInt<3,1>(MI.getOperand(2).getImm()))
3945 return HexagonII::HSIG_L2;
3946 break;
3947 case Hexagon::L2_loadrb_io:
3948 case Hexagon::dup_L2_loadrb_io:
3949 // Rd = memb(Rs+#u3:0)
3950 DstReg = MI.getOperand(0).getReg();
3951 SrcReg = MI.getOperand(1).getReg();
3952 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3953 MI.getOperand(2).isImm() &&
3954 isUInt<3>(MI.getOperand(2).getImm()))
3955 return HexagonII::HSIG_L2;
3956 break;
3957 case Hexagon::L2_loadrd_io:
3958 case Hexagon::dup_L2_loadrd_io:
3959 // Rdd = memd(r29+#u5:3)
3960 DstReg = MI.getOperand(0).getReg();
3961 SrcReg = MI.getOperand(1).getReg();
3962 if (isDblRegForSubInst(DstReg, HRI) &&
3963 Hexagon::IntRegsRegClass.contains(SrcReg) &&
3964 HRI.getStackRegister() == SrcReg &&
3965 MI.getOperand(2).isImm() &&
3966 isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
3967 return HexagonII::HSIG_L2;
3968 break;
3969 // dealloc_return is not documented in Hexagon Manual, but marked
3970 // with A_SUBINSN attribute in iset_v4classic.py.
3971 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3972 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3973 case Hexagon::L4_return:
3974 case Hexagon::L2_deallocframe:
3975 case Hexagon::dup_L2_deallocframe:
3976 return HexagonII::HSIG_L2;
3977 case Hexagon::EH_RETURN_JMPR:
3978 case Hexagon::PS_jmpret:
3979 case Hexagon::SL2_jumpr31:
3980 // jumpr r31
3981 // Actual form JMPR implicit-def %pc, implicit %r31, implicit internal %r0
3982 DstReg = MI.getOperand(0).getReg();
3983 if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
3984 return HexagonII::HSIG_L2;
3985 break;
3986 case Hexagon::PS_jmprett:
3987 case Hexagon::PS_jmpretf:
3988 case Hexagon::PS_jmprettnewpt:
3989 case Hexagon::PS_jmpretfnewpt:
3990 case Hexagon::PS_jmprettnew:
3991 case Hexagon::PS_jmpretfnew:
3992 case Hexagon::SL2_jumpr31_t:
3993 case Hexagon::SL2_jumpr31_f:
3994 case Hexagon::SL2_jumpr31_tnew:
3995 case Hexagon::SL2_jumpr31_fnew:
3996 DstReg = MI.getOperand(1).getReg();
3997 SrcReg = MI.getOperand(0).getReg();
3998 // [if ([!]p0[.new])] jumpr r31
3999 if ((Hexagon::PredRegsRegClass.contains(SrcReg) &&
4000 (Hexagon::P0 == SrcReg)) &&
4001 (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)))
4002 return HexagonII::HSIG_L2;
4003 break;
4004 case Hexagon::L4_return_t:
4005 case Hexagon::L4_return_f:
4006 case Hexagon::L4_return_tnew_pnt:
4007 case Hexagon::L4_return_fnew_pnt:
4008 case Hexagon::L4_return_tnew_pt:
4009 case Hexagon::L4_return_fnew_pt:
4010 // [if ([!]p0[.new])] dealloc_return
4011 SrcReg = MI.getOperand(0).getReg();
4012 if (Hexagon::PredRegsRegClass.contains(SrcReg) && (Hexagon::P0 == SrcReg))
4013 return HexagonII::HSIG_L2;
4014 break;
4015 //
4016 // Group S1:
4017 //
4018 // memw(Rs+#u4:2) = Rt
4019 // memb(Rs+#u4:0) = Rt
4020 case Hexagon::S2_storeri_io:
4021 case Hexagon::dup_S2_storeri_io:
4022 // Special case this one from Group S2.
4023 // memw(r29+#u5:2) = Rt
4024 Src1Reg = MI.getOperand(0).getReg();
4025 Src2Reg = MI.getOperand(2).getReg();
4026 if (Hexagon::IntRegsRegClass.contains(Src1Reg) &&
4027 isIntRegForSubInst(Src2Reg) &&
4028 HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
4029 isShiftedUInt<5,2>(MI.getOperand(1).getImm()))
4030 return HexagonII::HSIG_S2;
4031 // memw(Rs+#u4:2) = Rt
4032 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4033 MI.getOperand(1).isImm() &&
4034 isShiftedUInt<4,2>(MI.getOperand(1).getImm()))
4035 return HexagonII::HSIG_S1;
4036 break;
4037 case Hexagon::S2_storerb_io:
4038 case Hexagon::dup_S2_storerb_io:
4039 // memb(Rs+#u4:0) = Rt
4040 Src1Reg = MI.getOperand(0).getReg();
4041 Src2Reg = MI.getOperand(2).getReg();
4042 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4043 MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()))
4044 return HexagonII::HSIG_S1;
4045 break;
4046 //
4047 // Group S2:
4048 //
4049 // memh(Rs+#u3:1) = Rt
4050 // memw(r29+#u5:2) = Rt
4051 // memd(r29+#s6:3) = Rtt
4052 // memw(Rs+#u4:2) = #U1
4053 // memb(Rs+#u4) = #U1
4054 // allocframe(#u5:3)
4055 case Hexagon::S2_storerh_io:
4056 case Hexagon::dup_S2_storerh_io:
4057 // memh(Rs+#u3:1) = Rt
4058 Src1Reg = MI.getOperand(0).getReg();
4059 Src2Reg = MI.getOperand(2).getReg();
4060 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4061 MI.getOperand(1).isImm() &&
4062 isShiftedUInt<3,1>(MI.getOperand(1).getImm()))
4063 return HexagonII::HSIG_S1;
4064 break;
4065 case Hexagon::S2_storerd_io:
4066 case Hexagon::dup_S2_storerd_io:
4067 // memd(r29+#s6:3) = Rtt
4068 Src1Reg = MI.getOperand(0).getReg();
4069 Src2Reg = MI.getOperand(2).getReg();
4070 if (isDblRegForSubInst(Src2Reg, HRI) &&
4071 Hexagon::IntRegsRegClass.contains(Src1Reg) &&
4072 HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
4073 isShiftedInt<6,3>(MI.getOperand(1).getImm()))
4074 return HexagonII::HSIG_S2;
4075 break;
4076 case Hexagon::S4_storeiri_io:
4077 case Hexagon::dup_S4_storeiri_io:
4078 // memw(Rs+#u4:2) = #U1
4079 Src1Reg = MI.getOperand(0).getReg();
4080 if (isIntRegForSubInst(Src1Reg) && MI.getOperand(1).isImm() &&
4081 isShiftedUInt<4,2>(MI.getOperand(1).getImm()) &&
4082 MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
4083 return HexagonII::HSIG_S2;
4084 break;
4085 case Hexagon::S4_storeirb_io:
4086 case Hexagon::dup_S4_storeirb_io:
4087 // memb(Rs+#u4) = #U1
4088 Src1Reg = MI.getOperand(0).getReg();
4089 if (isIntRegForSubInst(Src1Reg) &&
4090 MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()) &&
4091 MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
4092 return HexagonII::HSIG_S2;
4093 break;
4094 case Hexagon::S2_allocframe:
4095 case Hexagon::dup_S2_allocframe:
4096 if (MI.getOperand(2).isImm() &&
4097 isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
4098 return HexagonII::HSIG_S1;
4099 break;
4100 //
4101 // Group A:
4102 //
4103 // Rx = add(Rx,#s7)
4104 // Rd = Rs
4105 // Rd = #u6
4106 // Rd = #-1
4107 // if ([!]P0[.new]) Rd = #0
4108 // Rd = add(r29,#u6:2)
4109 // Rx = add(Rx,Rs)
4110 // P0 = cmp.eq(Rs,#u2)
4111 // Rdd = combine(#0,Rs)
4112 // Rdd = combine(Rs,#0)
4113 // Rdd = combine(#u2,#U2)
4114 // Rd = add(Rs,#1)
4115 // Rd = add(Rs,#-1)
4116 // Rd = sxth/sxtb/zxtb/zxth(Rs)
4117 // Rd = and(Rs,#1)
4118 case Hexagon::A2_addi:
4119 case Hexagon::dup_A2_addi:
4120 DstReg = MI.getOperand(0).getReg();
4121 SrcReg = MI.getOperand(1).getReg();
4122 if (isIntRegForSubInst(DstReg)) {
4123 // Rd = add(r29,#u6:2)
4124 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
4125 HRI.getStackRegister() == SrcReg && MI.getOperand(2).isImm() &&
4126 isShiftedUInt<6,2>(MI.getOperand(2).getImm()))
4127 return HexagonII::HSIG_A;
4128 // Rx = add(Rx,#s7)
4129 if ((DstReg == SrcReg) && MI.getOperand(2).isImm() &&
4130 isInt<7>(MI.getOperand(2).getImm()))
4131 return HexagonII::HSIG_A;
4132 // Rd = add(Rs,#1)
4133 // Rd = add(Rs,#-1)
4134 if (isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
4135 ((MI.getOperand(2).getImm() == 1) ||
4136 (MI.getOperand(2).getImm() == -1)))
4137 return HexagonII::HSIG_A;
4138 }
4139 break;
4140 case Hexagon::A2_add:
4141 case Hexagon::dup_A2_add:
4142 // Rx = add(Rx,Rs)
4143 DstReg = MI.getOperand(0).getReg();
4144 Src1Reg = MI.getOperand(1).getReg();
4145 Src2Reg = MI.getOperand(2).getReg();
4146 if (isIntRegForSubInst(DstReg) && (DstReg == Src1Reg) &&
4147 isIntRegForSubInst(Src2Reg))
4148 return HexagonII::HSIG_A;
4149 break;
4150 case Hexagon::A2_andir:
4151 case Hexagon::dup_A2_andir:
4152 // Same as zxtb.
4153 // Rd16=and(Rs16,#255)
4154 // Rd16=and(Rs16,#1)
4155 DstReg = MI.getOperand(0).getReg();
4156 SrcReg = MI.getOperand(1).getReg();
4157 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
4158 MI.getOperand(2).isImm() &&
4159 ((MI.getOperand(2).getImm() == 1) ||
4160 (MI.getOperand(2).getImm() == 255)))
4161 return HexagonII::HSIG_A;
4162 break;
4163 case Hexagon::A2_tfr:
4164 case Hexagon::dup_A2_tfr:
4165 // Rd = Rs
4166 DstReg = MI.getOperand(0).getReg();
4167 SrcReg = MI.getOperand(1).getReg();
4168 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
4169 return HexagonII::HSIG_A;
4170 break;
4171 case Hexagon::A2_tfrsi:
4172 case Hexagon::dup_A2_tfrsi:
4173 // Rd = #u6
4174 // Do not test for #u6 size since the const is getting extended
4175 // regardless and compound could be formed.
4176 // Rd = #-1
4177 DstReg = MI.getOperand(0).getReg();
4178 if (isIntRegForSubInst(DstReg))
4179 return HexagonII::HSIG_A;
4180 break;
4181 case Hexagon::C2_cmoveit:
4182 case Hexagon::C2_cmovenewit:
4183 case Hexagon::C2_cmoveif:
4184 case Hexagon::C2_cmovenewif:
4185 case Hexagon::dup_C2_cmoveit:
4186 case Hexagon::dup_C2_cmovenewit:
4187 case Hexagon::dup_C2_cmoveif:
4188 case Hexagon::dup_C2_cmovenewif:
4189 // if ([!]P0[.new]) Rd = #0
4190 // Actual form:
4191 // %r16 = C2_cmovenewit internal %p0, 0, implicit undef %r16;
4192 DstReg = MI.getOperand(0).getReg();
4193 SrcReg = MI.getOperand(1).getReg();
4194 if (isIntRegForSubInst(DstReg) &&
4195 Hexagon::PredRegsRegClass.contains(SrcReg) && Hexagon::P0 == SrcReg &&
4196 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0)
4197 return HexagonII::HSIG_A;
4198 break;
4199 case Hexagon::C2_cmpeqi:
4200 case Hexagon::dup_C2_cmpeqi:
4201 // P0 = cmp.eq(Rs,#u2)
4202 DstReg = MI.getOperand(0).getReg();
4203 SrcReg = MI.getOperand(1).getReg();
4204 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
4205 Hexagon::P0 == DstReg && isIntRegForSubInst(SrcReg) &&
4206 MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm()))
4207 return HexagonII::HSIG_A;
4208 break;
4209 case Hexagon::A2_combineii:
4210 case Hexagon::A4_combineii:
4211 case Hexagon::dup_A2_combineii:
4212 case Hexagon::dup_A4_combineii:
4213 // Rdd = combine(#u2,#U2)
4214 DstReg = MI.getOperand(0).getReg();
4215 if (isDblRegForSubInst(DstReg, HRI) &&
4216 ((MI.getOperand(1).isImm() && isUInt<2>(MI.getOperand(1).getImm())) ||
4217 (MI.getOperand(1).isGlobal() &&
4218 isUInt<2>(MI.getOperand(1).getOffset()))) &&
4219 ((MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm())) ||
4220 (MI.getOperand(2).isGlobal() &&
4221 isUInt<2>(MI.getOperand(2).getOffset()))))
4222 return HexagonII::HSIG_A;
4223 break;
4224 case Hexagon::A4_combineri:
4225 case Hexagon::dup_A4_combineri:
4226 // Rdd = combine(Rs,#0)
4227 // Rdd = combine(Rs,#0)
4228 DstReg = MI.getOperand(0).getReg();
4229 SrcReg = MI.getOperand(1).getReg();
4230 if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
4231 ((MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) ||
4232 (MI.getOperand(2).isGlobal() && MI.getOperand(2).getOffset() == 0)))
4233 return HexagonII::HSIG_A;
4234 break;
4235 case Hexagon::A4_combineir:
4236 case Hexagon::dup_A4_combineir:
4237 // Rdd = combine(#0,Rs)
4238 DstReg = MI.getOperand(0).getReg();
4239 SrcReg = MI.getOperand(2).getReg();
4240 if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
4241 ((MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) ||
4242 (MI.getOperand(1).isGlobal() && MI.getOperand(1).getOffset() == 0)))
4243 return HexagonII::HSIG_A;
4244 break;
4245 case Hexagon::A2_sxtb:
4246 case Hexagon::A2_sxth:
4247 case Hexagon::A2_zxtb:
4248 case Hexagon::A2_zxth:
4249 case Hexagon::dup_A2_sxtb:
4250 case Hexagon::dup_A2_sxth:
4251 case Hexagon::dup_A2_zxtb:
4252 case Hexagon::dup_A2_zxth:
4253 // Rd = sxth/sxtb/zxtb/zxth(Rs)
4254 DstReg = MI.getOperand(0).getReg();
4255 SrcReg = MI.getOperand(1).getReg();
4256 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
4257 return HexagonII::HSIG_A;
4258 break;
4259 }
4260
4261 return HexagonII::HSIG_None;
4262}
4263
4264short HexagonInstrInfo::getEquivalentHWInstr(const MachineInstr &MI) const {
4265 return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Real);
4266}
4267
4268unsigned HexagonInstrInfo::getInstrTimingClassLatency(
4269 const InstrItineraryData *ItinData, const MachineInstr &MI) const {
4270 // Default to one cycle for no itinerary. However, an "empty" itinerary may
4271 // still have a MinLatency property, which getStageLatency checks.
4272 if (!ItinData)
4273 return getInstrLatency(ItinData, MI);
4274
4275 if (MI.isTransient())
4276 return 0;
4277 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
4278}
4279
4280/// getOperandLatency - Compute and return the use operand latency of a given
4281/// pair of def and use.
4282/// In most cases, the static scheduling itinerary was enough to determine the
4283/// operand latency. But it may not be possible for instructions with variable
4284/// number of defs / uses.
4285///
4286/// This is a raw interface to the itinerary that may be directly overriden by
4287/// a target. Use computeOperandLatency to get the best estimate of latency.
4288int HexagonInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
4289 const MachineInstr &DefMI,
4290 unsigned DefIdx,
4291 const MachineInstr &UseMI,
4292 unsigned UseIdx) const {
4293 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
4294
4295 // Get DefIdx and UseIdx for super registers.
4296 const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
4297
4298 if (DefMO.isReg() && Register::isPhysicalRegister(DefMO.getReg())) {
4299 if (DefMO.isImplicit()) {
4300 for (MCSuperRegIterator SR(DefMO.getReg(), &HRI); SR.isValid(); ++SR) {
4301 int Idx = DefMI.findRegisterDefOperandIdx(*SR, false, false, &HRI);
4302 if (Idx != -1) {
4303 DefIdx = Idx;
4304 break;
4305 }
4306 }
4307 }
4308
4309 const MachineOperand &UseMO = UseMI.getOperand(UseIdx);
4310 if (UseMO.isImplicit()) {
4311 for (MCSuperRegIterator SR(UseMO.getReg(), &HRI); SR.isValid(); ++SR) {
4312 int Idx = UseMI.findRegisterUseOperandIdx(*SR, false, &HRI);
4313 if (Idx != -1) {
4314 UseIdx = Idx;
4315 break;
4316 }
4317 }
4318 }
4319 }
4320
4321 int Latency = TargetInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
4322 UseMI, UseIdx);
4323 if (!Latency)
4324 // We should never have 0 cycle latency between two instructions unless
4325 // they can be packetized together. However, this decision can't be made
4326 // here.
4327 Latency = 1;
4328 return Latency;
4329}
4330
4331// inverts the predication logic.
4332// p -> NotP
4333// NotP -> P
4334bool HexagonInstrInfo::getInvertedPredSense(
4335 SmallVectorImpl<MachineOperand> &Cond) const {
4336 if (Cond.empty())
4337 return false;
4338 unsigned Opc = getInvertedPredicatedOpcode(Cond[0].getImm());
4339 Cond[0].setImm(Opc);
4340 return true;
4341}
4342
4343unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
4344 int InvPredOpcode;
4345 InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
4346 : Hexagon::getTruePredOpcode(Opc);
4347 if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
4348 return InvPredOpcode;
4349
4350 llvm_unreachable("Unexpected predicated instruction")::llvm::llvm_unreachable_internal("Unexpected predicated instruction"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4350)
;
4351}
4352
4353// Returns the max value that doesn't need to be extended.
4354int HexagonInstrInfo::getMaxValue(const MachineInstr &MI) const {
4355 const uint64_t F = MI.getDesc().TSFlags;
4356 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
4357 & HexagonII::ExtentSignedMask;
4358 unsigned bits = (F >> HexagonII::ExtentBitsPos)
4359 & HexagonII::ExtentBitsMask;
4360
4361 if (isSigned) // if value is signed
4362 return ~(-1U << (bits - 1));
4363 else
4364 return ~(-1U << bits);
4365}
4366
4367
4368bool HexagonInstrInfo::isAddrModeWithOffset(const MachineInstr &MI) const {
4369 switch (MI.getOpcode()) {
4370 case Hexagon::L2_loadrbgp:
4371 case Hexagon::L2_loadrdgp:
4372 case Hexagon::L2_loadrhgp:
4373 case Hexagon::L2_loadrigp:
4374 case Hexagon::L2_loadrubgp:
4375 case Hexagon::L2_loadruhgp:
4376 case Hexagon::S2_storerbgp:
4377 case Hexagon::S2_storerbnewgp:
4378 case Hexagon::S2_storerhgp:
4379 case Hexagon::S2_storerhnewgp:
4380 case Hexagon::S2_storerigp:
4381 case Hexagon::S2_storerinewgp:
4382 case Hexagon::S2_storerdgp:
4383 case Hexagon::S2_storerfgp:
4384 return true;
4385 }
4386 const uint64_t F = MI.getDesc().TSFlags;
4387 unsigned addrMode =
4388 ((F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask);
4389 // Disallow any base+offset instruction. The assembler does not yet reorder
4390 // based up any zero offset instruction.
4391 return (addrMode == HexagonII::BaseRegOffset ||
4392 addrMode == HexagonII::BaseImmOffset ||
4393 addrMode == HexagonII::BaseLongOffset);
4394}
4395
4396bool HexagonInstrInfo::isPureSlot0(const MachineInstr &MI) const {
4397 // Workaround for the Global Scheduler. Sometimes, it creates
4398 // A4_ext as a Pseudo instruction and calls this function to see if
4399 // it can be added to an existing bundle. Since the instruction doesn't
4400 // belong to any BB yet, we can't use getUnits API.
4401 if (MI.getOpcode() == Hexagon::A4_ext)
4402 return false;
4403
4404 unsigned FuncUnits = getUnits(MI);
4405 return HexagonFUnits::isSlot0Only(FuncUnits);
4406}
4407
4408bool HexagonInstrInfo::isRestrictNoSlot1Store(const MachineInstr &MI) const {
4409 const uint64_t F = MI.getDesc().TSFlags;
4410 return ((F >> HexagonII::RestrictNoSlot1StorePos) &
4411 HexagonII::RestrictNoSlot1StoreMask);
4412}
4413
4414void HexagonInstrInfo::changeDuplexOpcode(MachineBasicBlock::instr_iterator MII,
4415 bool ToBigInstrs) const {
4416 int Opcode = -1;
4417 if (ToBigInstrs) { // To BigCore Instr.
4418 // Check if the instruction can form a Duplex.
4419 if (getDuplexCandidateGroup(*MII))
4420 // Get the opcode marked "dup_*" tag.
4421 Opcode = getDuplexOpcode(*MII, ToBigInstrs);
4422 } else // To TinyCore Instr.
4423 Opcode = getDuplexOpcode(*MII, ToBigInstrs);
4424
4425 // Change the opcode of the instruction.
4426 if (Opcode >= 0)
4427 MII->setDesc(get(Opcode));
4428}
4429
4430// This function is used to translate instructions to facilitate generating
4431// Duplexes on TinyCore.
4432void HexagonInstrInfo::translateInstrsForDup(MachineFunction &MF,
4433 bool ToBigInstrs) const {
4434 for (auto &MB : MF)
4435 for (MachineBasicBlock::instr_iterator Instr = MB.instr_begin(),
4436 End = MB.instr_end();
4437 Instr != End; ++Instr)
4438 changeDuplexOpcode(Instr, ToBigInstrs);
4439}
4440
4441// This is a specialized form of above function.
4442void HexagonInstrInfo::translateInstrsForDup(
4443 MachineBasicBlock::instr_iterator MII, bool ToBigInstrs) const {
4444 MachineBasicBlock *MBB = MII->getParent();
4445 while ((MII != MBB->instr_end()) && MII->isInsideBundle()) {
4446 changeDuplexOpcode(MII, ToBigInstrs);
4447 ++MII;
4448 }
4449}
4450
4451unsigned HexagonInstrInfo::getMemAccessSize(const MachineInstr &MI) const {
4452 using namespace HexagonII;
4453
4454 const uint64_t F = MI.getDesc().TSFlags;
4455 unsigned S = (F >> MemAccessSizePos) & MemAccesSizeMask;
4456 unsigned Size = getMemAccessSizeInBytes(MemAccessSize(S));
4457 if (Size != 0)
4458 return Size;
4459 // Y2_dcfetchbo is special
4460 if (MI.getOpcode() == Hexagon::Y2_dcfetchbo)
4461 return HexagonII::DoubleWordAccess;
4462
4463 // Handle vector access sizes.
4464 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
4465 switch (S) {
4466 case HexagonII::HVXVectorAccess:
4467 return HRI.getSpillSize(Hexagon::HvxVRRegClass);
4468 default:
4469 llvm_unreachable("Unexpected instruction")::llvm::llvm_unreachable_internal("Unexpected instruction", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4469)
;
4470 }
4471}
4472
4473// Returns the min value that doesn't need to be extended.
4474int HexagonInstrInfo::getMinValue(const MachineInstr &MI) const {
4475 const uint64_t F = MI.getDesc().TSFlags;
4476 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
4477 & HexagonII::ExtentSignedMask;
4478 unsigned bits = (F >> HexagonII::ExtentBitsPos)
4479 & HexagonII::ExtentBitsMask;
4480
4481 if (isSigned) // if value is signed
4482 return -1U << (bits - 1);
4483 else
4484 return 0;
4485}
4486
4487// Returns opcode of the non-extended equivalent instruction.
4488short HexagonInstrInfo::getNonExtOpcode(const MachineInstr &MI) const {
4489 // Check if the instruction has a register form that uses register in place
4490 // of the extended operand, if so return that as the non-extended form.
4491 short NonExtOpcode = Hexagon::getRegForm(MI.getOpcode());
4492 if (NonExtOpcode >= 0)
4493 return NonExtOpcode;
4494
4495 if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
4496 // Check addressing mode and retrieve non-ext equivalent instruction.
4497 switch (getAddrMode(MI)) {
4498 case HexagonII::Absolute:
4499 return Hexagon::changeAddrMode_abs_io(MI.getOpcode());
4500 case HexagonII::BaseImmOffset:
4501 return Hexagon::changeAddrMode_io_rr(MI.getOpcode());
4502 case HexagonII::BaseLongOffset:
4503 return Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
4504
4505 default:
4506 return -1;
4507 }
4508 }
4509 return -1;
4510}
4511
4512bool HexagonInstrInfo::getPredReg(ArrayRef<MachineOperand> Cond,
4513 unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const {
4514 if (Cond.empty())
4515 return false;
4516 assert(Cond.size() == 2)(static_cast <bool> (Cond.size() == 2) ? void (0) : __assert_fail
("Cond.size() == 2", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4516, __extension__ __PRETTY_FUNCTION__))
;
4517 if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) {
4518 LLVM_DEBUG(dbgs() << "No predregs for new-value jumps/endloop")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "No predregs for new-value jumps/endloop"
; } } while (false)
;
4519 return false;
4520 }
4521 PredReg = Cond[1].getReg();
4522 PredRegPos = 1;
4523 // See IfConversion.cpp why we add RegState::Implicit | RegState::Undef
4524 PredRegFlags = 0;
4525 if (Cond[1].isImplicit())
4526 PredRegFlags = RegState::Implicit;
4527 if (Cond[1].isUndef())
4528 PredRegFlags |= RegState::Undef;
4529 return true;
4530}
4531
4532short HexagonInstrInfo::getPseudoInstrPair(const MachineInstr &MI) const {
4533 return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Pseudo);
4534}
4535
4536short HexagonInstrInfo::getRegForm(const MachineInstr &MI) const {
4537 return Hexagon::getRegForm(MI.getOpcode());
4538}
4539
4540// Return the number of bytes required to encode the instruction.
4541// Hexagon instructions are fixed length, 4 bytes, unless they
4542// use a constant extender, which requires another 4 bytes.
4543// For debug instructions and prolog labels, return 0.
4544unsigned HexagonInstrInfo::getSize(const MachineInstr &MI) const {
4545 if (MI.isDebugInstr() || MI.isPosition())
4546 return 0;
4547
4548 unsigned Size = MI.getDesc().getSize();
4549 if (!Size)
4550 // Assume the default insn size in case it cannot be determined
4551 // for whatever reason.
4552 Size = HEXAGON_INSTR_SIZE4;
4553
4554 if (isConstExtended(MI) || isExtended(MI))
4555 Size += HEXAGON_INSTR_SIZE4;
4556
4557 // Try and compute number of instructions in asm.
4558 if (BranchRelaxAsmLarge && MI.getOpcode() == Hexagon::INLINEASM) {
4559 const MachineBasicBlock &MBB = *MI.getParent();
4560 const MachineFunction *MF = MBB.getParent();
4561 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
4562
4563 // Count the number of register definitions to find the asm string.
4564 unsigned NumDefs = 0;
4565 for (; MI.getOperand(NumDefs).isReg() && MI.getOperand(NumDefs).isDef();
4566 ++NumDefs)
4567 assert(NumDefs != MI.getNumOperands()-2 && "No asm string?")(static_cast <bool> (NumDefs != MI.getNumOperands()-2 &&
"No asm string?") ? void (0) : __assert_fail ("NumDefs != MI.getNumOperands()-2 && \"No asm string?\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4567, __extension__
__PRETTY_FUNCTION__))
;
4568
4569 assert(MI.getOperand(NumDefs).isSymbol() && "No asm string?")(static_cast <bool> (MI.getOperand(NumDefs).isSymbol() &&
"No asm string?") ? void (0) : __assert_fail ("MI.getOperand(NumDefs).isSymbol() && \"No asm string?\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4569, __extension__
__PRETTY_FUNCTION__))
;
4570 // Disassemble the AsmStr and approximate number of instructions.
4571 const char *AsmStr = MI.getOperand(NumDefs).getSymbolName();
4572 Size = getInlineAsmLength(AsmStr, *MAI);
4573 }
4574
4575 return Size;
4576}
4577
4578uint64_t HexagonInstrInfo::getType(const MachineInstr &MI) const {
4579 const uint64_t F = MI.getDesc().TSFlags;
4580 return (F >> HexagonII::TypePos) & HexagonII::TypeMask;
4581}
4582
4583InstrStage::FuncUnits HexagonInstrInfo::getUnits(const MachineInstr &MI) const {
4584 const InstrItineraryData &II = *Subtarget.getInstrItineraryData();
4585 const InstrStage &IS = *II.beginStage(MI.getDesc().getSchedClass());
4586
4587 return IS.getUnits();
4588}
4589
4590// Calculate size of the basic block without debug instructions.
4591unsigned HexagonInstrInfo::nonDbgBBSize(const MachineBasicBlock *BB) const {
4592 return nonDbgMICount(BB->instr_begin(), BB->instr_end());
4593}
4594
4595unsigned HexagonInstrInfo::nonDbgBundleSize(
4596 MachineBasicBlock::const_iterator BundleHead) const {
4597 assert(BundleHead->isBundle() && "Not a bundle header")(static_cast <bool> (BundleHead->isBundle() &&
"Not a bundle header") ? void (0) : __assert_fail ("BundleHead->isBundle() && \"Not a bundle header\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4597, __extension__
__PRETTY_FUNCTION__))
;
4598 auto MII = BundleHead.getInstrIterator();
4599 // Skip the bundle header.
4600 return nonDbgMICount(++MII, getBundleEnd(BundleHead.getInstrIterator()));
4601}
4602
4603/// immediateExtend - Changes the instruction in place to one using an immediate
4604/// extender.
4605void HexagonInstrInfo::immediateExtend(MachineInstr &MI) const {
4606 assert((isExtendable(MI)||isConstExtended(MI)) &&(static_cast <bool> ((isExtendable(MI)||isConstExtended
(MI)) && "Instruction must be extendable") ? void (0)
: __assert_fail ("(isExtendable(MI)||isConstExtended(MI)) && \"Instruction must be extendable\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4607, __extension__
__PRETTY_FUNCTION__))
4607 "Instruction must be extendable")(static_cast <bool> ((isExtendable(MI)||isConstExtended
(MI)) && "Instruction must be extendable") ? void (0)
: __assert_fail ("(isExtendable(MI)||isConstExtended(MI)) && \"Instruction must be extendable\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4607, __extension__
__PRETTY_FUNCTION__))
;
4608 // Find which operand is extendable.
4609 short ExtOpNum = getCExtOpNum(MI);
4610 MachineOperand &MO = MI.getOperand(ExtOpNum);
4611 // This needs to be something we understand.
4612 assert((MO.isMBB() || MO.isImm()) &&(static_cast <bool> ((MO.isMBB() || MO.isImm()) &&
"Branch with unknown extendable field type") ? void (0) : __assert_fail
("(MO.isMBB() || MO.isImm()) && \"Branch with unknown extendable field type\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4613, __extension__
__PRETTY_FUNCTION__))
4613 "Branch with unknown extendable field type")(static_cast <bool> ((MO.isMBB() || MO.isImm()) &&
"Branch with unknown extendable field type") ? void (0) : __assert_fail
("(MO.isMBB() || MO.isImm()) && \"Branch with unknown extendable field type\""
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4613, __extension__
__PRETTY_FUNCTION__))
;
4614 // Mark given operand as extended.
4615 MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
4616}
4617
4618bool HexagonInstrInfo::invertAndChangeJumpTarget(
4619 MachineInstr &MI, MachineBasicBlock *NewTarget) const {
4620 LLVM_DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n[invertAndChangeJumpTarget] to "
<< printMBBReference(*NewTarget); MI.dump();; } } while
(false)
4621 << printMBBReference(*NewTarget);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n[invertAndChangeJumpTarget] to "
<< printMBBReference(*NewTarget); MI.dump();; } } while
(false)
4622 MI.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n[invertAndChangeJumpTarget] to "
<< printMBBReference(*NewTarget); MI.dump();; } } while
(false)
;
4623 assert(MI.isBranch())(static_cast <bool> (MI.isBranch()) ? void (0) : __assert_fail
("MI.isBranch()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4623, __extension__ __PRETTY_FUNCTION__))
;
4624 unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode());
4625 int TargetPos = MI.getNumOperands() - 1;
4626 // In general branch target is the last operand,
4627 // but some implicit defs added at the end might change it.
4628 while ((TargetPos > -1) && !MI.getOperand(TargetPos).isMBB())
4629 --TargetPos;
4630 assert((TargetPos >= 0) && MI.getOperand(TargetPos).isMBB())(static_cast <bool> ((TargetPos >= 0) && MI.
getOperand(TargetPos).isMBB()) ? void (0) : __assert_fail ("(TargetPos >= 0) && MI.getOperand(TargetPos).isMBB()"
, "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp", 4630, __extension__
__PRETTY_FUNCTION__))
;
4631 MI.getOperand(TargetPos).setMBB(NewTarget);
4632 if (EnableBranchPrediction && isPredicatedNew(MI)) {
4633 NewOpcode = reversePrediction(NewOpcode);
4634 }
4635 MI.setDesc(get(NewOpcode));
4636 return true;
4637}
4638
4639void HexagonInstrInfo::genAllInsnTimingClasses(MachineFunction &MF) const {
4640 /* +++ The code below is used to generate complete set of Hexagon Insn +++ */
4641 MachineFunction::iterator A = MF.begin();
4642 MachineBasicBlock &B = *A;
4643 MachineBasicBlock::iterator I = B.begin();
4644 DebugLoc DL = I->getDebugLoc();
4645 MachineInstr *NewMI;
4646
4647 for (unsigned insn = TargetOpcode::GENERIC_OP_END+1;
4648 insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
4649 NewMI = BuildMI(B, I, DL, get(insn));
4650 LLVM_DEBUG(dbgs() << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n" << getName
(NewMI->getOpcode()) << " Class: " << NewMI->
getDesc().getSchedClass(); } } while (false)
4651 << getName(NewMI->getOpcode())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n" << getName
(NewMI->getOpcode()) << " Class: " << NewMI->
getDesc().getSchedClass(); } } while (false)
4652 << " Class: " << NewMI->getDesc().getSchedClass())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\n" << getName
(NewMI->getOpcode()) << " Class: " << NewMI->
getDesc().getSchedClass(); } } while (false)
;
4653 NewMI->eraseFromParent();
4654 }
4655 /* --- The code above is used to generate complete set of Hexagon Insn --- */
4656}
4657
4658// inverts the predication logic.
4659// p -> NotP
4660// NotP -> P
4661bool HexagonInstrInfo::reversePredSense(MachineInstr &MI) const {
4662 LLVM_DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-instrinfo")) { dbgs() << "\nTrying to reverse pred. sense of:"
; MI.dump(); } } while (false)
;
4663 MI.setDesc(get(getInvertedPredicatedOpcode(MI.getOpcode())));
4664 return true;
4665}
4666
4667// Reverse the branch prediction.
4668unsigned HexagonInstrInfo::reversePrediction(unsigned Opcode) const {
4669 int PredRevOpcode = -1;
4670 if (isPredictedTaken(Opcode))
4671 PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
4672 else
4673 PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
4674 assert(PredRevOpcode > 0)(static_cast <bool> (PredRevOpcode > 0) ? void (0) :
__assert_fail ("PredRevOpcode > 0", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4674, __extension__ __PRETTY_FUNCTION__))
;
4675 return PredRevOpcode;
4676}
4677
4678// TODO: Add more rigorous validation.
4679bool HexagonInstrInfo::validateBranchCond(const ArrayRef<MachineOperand> &Cond)
4680 const {
4681 return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1));
4682}
4683
4684void HexagonInstrInfo::
4685setBundleNoShuf(MachineBasicBlock::instr_iterator MIB) const {
4686 assert(MIB->isBundle())(static_cast <bool> (MIB->isBundle()) ? void (0) : __assert_fail
("MIB->isBundle()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4686, __extension__ __PRETTY_FUNCTION__))
;
4687 MachineOperand &Operand = MIB->getOperand(0);
4688 if (Operand.isImm())
4689 Operand.setImm(Operand.getImm() | memShufDisabledMask);
4690 else
4691 MIB->addOperand(MachineOperand::CreateImm(memShufDisabledMask));
4692}
4693
4694bool HexagonInstrInfo::getBundleNoShuf(const MachineInstr &MIB) const {
4695 assert(MIB.isBundle())(static_cast <bool> (MIB.isBundle()) ? void (0) : __assert_fail
("MIB.isBundle()", "llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp"
, 4695, __extension__ __PRETTY_FUNCTION__))
;
4696 const MachineOperand &Operand = MIB.getOperand(0);
4697 return (Operand.isImm() && (Operand.getImm() & memShufDisabledMask) != 0);
4698}
4699
4700// Addressing mode relations.
4701short HexagonInstrInfo::changeAddrMode_abs_io(short Opc) const {
4702 return Opc >= 0 ? Hexagon::changeAddrMode_abs_io(Opc) : Opc;
4703}
4704
4705short HexagonInstrInfo::changeAddrMode_io_abs(short Opc) const {
4706 return Opc >= 0 ? Hexagon::changeAddrMode_io_abs(Opc) : Opc;
4707}
4708
4709short HexagonInstrInfo::changeAddrMode_io_pi(short Opc) const {
4710 return Opc >= 0 ? Hexagon::changeAddrMode_io_pi(Opc) : Opc;
4711}
4712
4713short HexagonInstrInfo::changeAddrMode_io_rr(short Opc) const {
4714 return Opc >= 0 ? Hexagon::changeAddrMode_io_rr(Opc) : Opc;
4715}
4716
4717short HexagonInstrInfo::changeAddrMode_pi_io(short Opc) const {
4718 return Opc >= 0 ? Hexagon::changeAddrMode_pi_io(Opc) : Opc;
4719}
4720
4721short HexagonInstrInfo::changeAddrMode_rr_io(short Opc) const {
4722 return Opc >= 0 ? Hexagon::changeAddrMode_rr_io(Opc) : Opc;
4723}
4724
4725short HexagonInstrInfo::changeAddrMode_rr_ur(short Opc) const {
4726 return Opc >= 0 ? Hexagon::changeAddrMode_rr_ur(Opc) : Opc;
4727}
4728
4729short HexagonInstrInfo::changeAddrMode_ur_rr(short Opc) const {
4730 return Opc >= 0 ? Hexagon::changeAddrMode_ur_rr(Opc) : Opc;
4731}
4732
4733MCInst HexagonInstrInfo::getNop() const {
4734 static const MCInst Nop = MCInstBuilder(Hexagon::A2_nop);
4735
4736 return MCInstBuilder(Hexagon::BUNDLE)
4737 .addImm(0)
4738 .addInst(&Nop);
4739}