Bug Summary

File:build/source/llvm/lib/CodeGen/TargetInstrInfo.cpp
Warning:line 1045, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name TargetInstrInfo.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm -resource-dir /usr/lib/llvm-17/lib/clang/17 -I lib/CodeGen -I /build/source/llvm/lib/CodeGen -I include -I /build/source/llvm/include -D _DEBUG -D _GLIBCXX_ASSERTIONS -D _GNU_SOURCE -D _LIBCPP_ENABLE_ASSERTIONS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm=build-llvm -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm=build-llvm -fcoverage-prefix-map=/build/source/= -source-date-epoch 1677842174 -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-03-03-140516-16496-1 -x c++ /build/source/llvm/lib/CodeGen/TargetInstrInfo.cpp
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/TargetInstrInfo.h"
14#include "llvm/ADT/StringExtras.h"
15#include "llvm/BinaryFormat/Dwarf.h"
16#include "llvm/CodeGen/MachineCombinerPattern.h"
17#include "llvm/CodeGen/MachineFrameInfo.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineMemOperand.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/CodeGen/MachineScheduler.h"
22#include "llvm/CodeGen/MachineTraceMetrics.h"
23#include "llvm/CodeGen/PseudoSourceValue.h"
24#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
25#include "llvm/CodeGen/StackMaps.h"
26#include "llvm/CodeGen/TargetFrameLowering.h"
27#include "llvm/CodeGen/TargetLowering.h"
28#include "llvm/CodeGen/TargetRegisterInfo.h"
29#include "llvm/CodeGen/TargetSchedule.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/DebugInfoMetadata.h"
32#include "llvm/MC/MCAsmInfo.h"
33#include "llvm/MC/MCInstrItineraries.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/ErrorHandling.h"
36#include "llvm/Support/raw_ostream.h"
37
38using namespace llvm;
39
40static cl::opt<bool> DisableHazardRecognizer(
41 "disable-sched-hazard", cl::Hidden, cl::init(false),
42 cl::desc("Disable hazard detection during preRA scheduling"));
43
44TargetInstrInfo::~TargetInstrInfo() = default;
45
46const TargetRegisterClass*
47TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
48 const TargetRegisterInfo *TRI,
49 const MachineFunction &MF) const {
50 if (OpNum >= MCID.getNumOperands())
51 return nullptr;
52
53 short RegClass = MCID.operands()[OpNum].RegClass;
54 if (MCID.operands()[OpNum].isLookupPtrRegClass())
55 return TRI->getPointerRegClass(MF, RegClass);
56
57 // Instructions like INSERT_SUBREG do not have fixed register classes.
58 if (RegClass < 0)
59 return nullptr;
60
61 // Otherwise just look it up normally.
62 return TRI->getRegClass(RegClass);
63}
64
65/// insertNoop - Insert a noop into the instruction stream at the specified
66/// point.
67void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
68 MachineBasicBlock::iterator MI) const {
69 llvm_unreachable("Target didn't implement insertNoop!")::llvm::llvm_unreachable_internal("Target didn't implement insertNoop!"
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 69)
;
70}
71
72/// insertNoops - Insert noops into the instruction stream at the specified
73/// point.
74void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB,
75 MachineBasicBlock::iterator MI,
76 unsigned Quantity) const {
77 for (unsigned i = 0; i < Quantity; ++i)
78 insertNoop(MBB, MI);
79}
80
81static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
82 return strncmp(Str, MAI.getCommentString().data(),
83 MAI.getCommentString().size()) == 0;
84}
85
86/// Measure the specified inline asm to determine an approximation of its
87/// length.
88/// Comments (which run till the next SeparatorString or newline) do not
89/// count as an instruction.
90/// Any other non-whitespace text is considered an instruction, with
91/// multiple instructions separated by SeparatorString or newlines.
92/// Variable-length instructions are not handled here; this function
93/// may be overloaded in the target code to do that.
94/// We implement a special case of the .space directive which takes only a
95/// single integer argument in base 10 that is the size in bytes. This is a
96/// restricted form of the GAS directive in that we only interpret
97/// simple--i.e. not a logical or arithmetic expression--size values without
98/// the optional fill value. This is primarily used for creating arbitrary
99/// sized inline asm blocks for testing purposes.
100unsigned TargetInstrInfo::getInlineAsmLength(
101 const char *Str,
102 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
103 // Count the number of instructions in the asm.
104 bool AtInsnStart = true;
105 unsigned Length = 0;
106 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
107 for (; *Str; ++Str) {
108 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
109 strlen(MAI.getSeparatorString())) == 0) {
110 AtInsnStart = true;
111 } else if (isAsmComment(Str, MAI)) {
112 // Stop counting as an instruction after a comment until the next
113 // separator.
114 AtInsnStart = false;
115 }
116
117 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
118 unsigned AddLength = MaxInstLength;
119 if (strncmp(Str, ".space", 6) == 0) {
120 char *EStr;
121 int SpaceSize;
122 SpaceSize = strtol(Str + 6, &EStr, 10);
123 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
124 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
125 ++EStr;
126 if (*EStr == '\0' || *EStr == '\n' ||
127 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
128 AddLength = SpaceSize;
129 }
130 Length += AddLength;
131 AtInsnStart = false;
132 }
133 }
134
135 return Length;
136}
137
138/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
139/// after it, replacing it with an unconditional branch to NewDest.
140void
141TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
142 MachineBasicBlock *NewDest) const {
143 MachineBasicBlock *MBB = Tail->getParent();
144
145 // Remove all the old successors of MBB from the CFG.
146 while (!MBB->succ_empty())
147 MBB->removeSuccessor(MBB->succ_begin());
148
149 // Save off the debug loc before erasing the instruction.
150 DebugLoc DL = Tail->getDebugLoc();
151
152 // Update call site info and remove all the dead instructions
153 // from the end of MBB.
154 while (Tail != MBB->end()) {
155 auto MI = Tail++;
156 if (MI->shouldUpdateCallSiteInfo())
157 MBB->getParent()->eraseCallSiteInfo(&*MI);
158 MBB->erase(MI);
159 }
160
161 // If MBB isn't immediately before MBB, insert a branch to it.
162 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
163 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
164 MBB->addSuccessor(NewDest);
165}
166
167MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
168 bool NewMI, unsigned Idx1,
169 unsigned Idx2) const {
170 const MCInstrDesc &MCID = MI.getDesc();
171 bool HasDef = MCID.getNumDefs();
172 if (HasDef && !MI.getOperand(0).isReg())
173 // No idea how to commute this instruction. Target should implement its own.
174 return nullptr;
175
176 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
177 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
178 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&(static_cast <bool> (findCommutedOpIndices(MI, CommutableOpIdx1
, CommutableOpIdx2) && CommutableOpIdx1 == Idx1 &&
CommutableOpIdx2 == Idx2 && "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."
) ? void (0) : __assert_fail ("findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && \"TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 180, __extension__ __PRETTY_FUNCTION__
))
179 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&(static_cast <bool> (findCommutedOpIndices(MI, CommutableOpIdx1
, CommutableOpIdx2) && CommutableOpIdx1 == Idx1 &&
CommutableOpIdx2 == Idx2 && "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."
) ? void (0) : __assert_fail ("findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && \"TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 180, __extension__ __PRETTY_FUNCTION__
))
180 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.")(static_cast <bool> (findCommutedOpIndices(MI, CommutableOpIdx1
, CommutableOpIdx2) && CommutableOpIdx1 == Idx1 &&
CommutableOpIdx2 == Idx2 && "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."
) ? void (0) : __assert_fail ("findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && \"TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 180, __extension__ __PRETTY_FUNCTION__
))
;
181 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&(static_cast <bool> (MI.getOperand(Idx1).isReg() &&
MI.getOperand(Idx2).isReg() && "This only knows how to commute register operands so far"
) ? void (0) : __assert_fail ("MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && \"This only knows how to commute register operands so far\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 182, __extension__ __PRETTY_FUNCTION__
))
182 "This only knows how to commute register operands so far")(static_cast <bool> (MI.getOperand(Idx1).isReg() &&
MI.getOperand(Idx2).isReg() && "This only knows how to commute register operands so far"
) ? void (0) : __assert_fail ("MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && \"This only knows how to commute register operands so far\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 182, __extension__ __PRETTY_FUNCTION__
))
;
183
184 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
185 Register Reg1 = MI.getOperand(Idx1).getReg();
186 Register Reg2 = MI.getOperand(Idx2).getReg();
187 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
188 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
189 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
190 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
191 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
192 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
193 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
194 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
195 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
196 // Avoid calling isRenamable for virtual registers since we assert that
197 // renamable property is only queried/set for physical registers.
198 bool Reg1IsRenamable =
199 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
200 bool Reg2IsRenamable =
201 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
202 // If destination is tied to either of the commuted source register, then
203 // it must be updated.
204 if (HasDef && Reg0 == Reg1 &&
205 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
206 Reg2IsKill = false;
207 Reg0 = Reg2;
208 SubReg0 = SubReg2;
209 } else if (HasDef && Reg0 == Reg2 &&
210 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
211 Reg1IsKill = false;
212 Reg0 = Reg1;
213 SubReg0 = SubReg1;
214 }
215
216 MachineInstr *CommutedMI = nullptr;
217 if (NewMI) {
218 // Create a new instruction.
219 MachineFunction &MF = *MI.getMF();
220 CommutedMI = MF.CloneMachineInstr(&MI);
221 } else {
222 CommutedMI = &MI;
223 }
224
225 if (HasDef) {
226 CommutedMI->getOperand(0).setReg(Reg0);
227 CommutedMI->getOperand(0).setSubReg(SubReg0);
228 }
229 CommutedMI->getOperand(Idx2).setReg(Reg1);
230 CommutedMI->getOperand(Idx1).setReg(Reg2);
231 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
232 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
233 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
234 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
235 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
236 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
237 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
238 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
239 // Avoid calling setIsRenamable for virtual registers since we assert that
240 // renamable property is only queried/set for physical registers.
241 if (Reg1.isPhysical())
242 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
243 if (Reg2.isPhysical())
244 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
245 return CommutedMI;
246}
247
248MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
249 unsigned OpIdx1,
250 unsigned OpIdx2) const {
251 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
252 // any commutable operand, which is done in findCommutedOpIndices() method
253 // called below.
254 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
255 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
256 assert(MI.isCommutable() &&(static_cast <bool> (MI.isCommutable() && "Precondition violation: MI must be commutable."
) ? void (0) : __assert_fail ("MI.isCommutable() && \"Precondition violation: MI must be commutable.\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 257, __extension__ __PRETTY_FUNCTION__
))
257 "Precondition violation: MI must be commutable.")(static_cast <bool> (MI.isCommutable() && "Precondition violation: MI must be commutable."
) ? void (0) : __assert_fail ("MI.isCommutable() && \"Precondition violation: MI must be commutable.\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 257, __extension__ __PRETTY_FUNCTION__
))
;
258 return nullptr;
259 }
260 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
261}
262
263bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
264 unsigned &ResultIdx2,
265 unsigned CommutableOpIdx1,
266 unsigned CommutableOpIdx2) {
267 if (ResultIdx1 == CommuteAnyOperandIndex &&
268 ResultIdx2 == CommuteAnyOperandIndex) {
269 ResultIdx1 = CommutableOpIdx1;
270 ResultIdx2 = CommutableOpIdx2;
271 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
272 if (ResultIdx2 == CommutableOpIdx1)
273 ResultIdx1 = CommutableOpIdx2;
274 else if (ResultIdx2 == CommutableOpIdx2)
275 ResultIdx1 = CommutableOpIdx1;
276 else
277 return false;
278 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
279 if (ResultIdx1 == CommutableOpIdx1)
280 ResultIdx2 = CommutableOpIdx2;
281 else if (ResultIdx1 == CommutableOpIdx2)
282 ResultIdx2 = CommutableOpIdx1;
283 else
284 return false;
285 } else
286 // Check that the result operand indices match the given commutable
287 // operand indices.
288 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
289 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
290
291 return true;
292}
293
294bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
295 unsigned &SrcOpIdx1,
296 unsigned &SrcOpIdx2) const {
297 assert(!MI.isBundle() &&(static_cast <bool> (!MI.isBundle() && "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"
) ? void (0) : __assert_fail ("!MI.isBundle() && \"TargetInstrInfo::findCommutedOpIndices() can't handle bundles\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 298, __extension__ __PRETTY_FUNCTION__
))
298 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles")(static_cast <bool> (!MI.isBundle() && "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"
) ? void (0) : __assert_fail ("!MI.isBundle() && \"TargetInstrInfo::findCommutedOpIndices() can't handle bundles\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 298, __extension__ __PRETTY_FUNCTION__
))
;
299
300 const MCInstrDesc &MCID = MI.getDesc();
301 if (!MCID.isCommutable())
302 return false;
303
304 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
305 // is not true, then the target must implement this.
306 unsigned CommutableOpIdx1 = MCID.getNumDefs();
307 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
308 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
309 CommutableOpIdx1, CommutableOpIdx2))
310 return false;
311
312 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
313 // No idea.
314 return false;
315 return true;
316}
317
318bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
319 if (!MI.isTerminator()) return false;
320
321 // Conditional branch is a special case.
322 if (MI.isBranch() && !MI.isBarrier())
323 return true;
324 if (!MI.isPredicable())
325 return true;
326 return !isPredicated(MI);
327}
328
329bool TargetInstrInfo::PredicateInstruction(
330 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
331 bool MadeChange = false;
332
333 assert(!MI.isBundle() &&(static_cast <bool> (!MI.isBundle() && "TargetInstrInfo::PredicateInstruction() can't handle bundles"
) ? void (0) : __assert_fail ("!MI.isBundle() && \"TargetInstrInfo::PredicateInstruction() can't handle bundles\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 334, __extension__ __PRETTY_FUNCTION__
))
334 "TargetInstrInfo::PredicateInstruction() can't handle bundles")(static_cast <bool> (!MI.isBundle() && "TargetInstrInfo::PredicateInstruction() can't handle bundles"
) ? void (0) : __assert_fail ("!MI.isBundle() && \"TargetInstrInfo::PredicateInstruction() can't handle bundles\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 334, __extension__ __PRETTY_FUNCTION__
))
;
335
336 const MCInstrDesc &MCID = MI.getDesc();
337 if (!MI.isPredicable())
338 return false;
339
340 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
341 if (MCID.operands()[i].isPredicate()) {
342 MachineOperand &MO = MI.getOperand(i);
343 if (MO.isReg()) {
344 MO.setReg(Pred[j].getReg());
345 MadeChange = true;
346 } else if (MO.isImm()) {
347 MO.setImm(Pred[j].getImm());
348 MadeChange = true;
349 } else if (MO.isMBB()) {
350 MO.setMBB(Pred[j].getMBB());
351 MadeChange = true;
352 }
353 ++j;
354 }
355 }
356 return MadeChange;
357}
358
359bool TargetInstrInfo::hasLoadFromStackSlot(
360 const MachineInstr &MI,
361 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
362 size_t StartSize = Accesses.size();
363 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
364 oe = MI.memoperands_end();
365 o != oe; ++o) {
366 if ((*o)->isLoad() &&
367 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
368 Accesses.push_back(*o);
369 }
370 return Accesses.size() != StartSize;
371}
372
373bool TargetInstrInfo::hasStoreToStackSlot(
374 const MachineInstr &MI,
375 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
376 size_t StartSize = Accesses.size();
377 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
378 oe = MI.memoperands_end();
379 o != oe; ++o) {
380 if ((*o)->isStore() &&
381 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
382 Accesses.push_back(*o);
383 }
384 return Accesses.size() != StartSize;
385}
386
387bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
388 unsigned SubIdx, unsigned &Size,
389 unsigned &Offset,
390 const MachineFunction &MF) const {
391 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
392 if (!SubIdx) {
393 Size = TRI->getSpillSize(*RC);
394 Offset = 0;
395 return true;
396 }
397 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
398 // Convert bit size to byte size.
399 if (BitSize % 8)
400 return false;
401
402 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
403 if (BitOffset < 0 || BitOffset % 8)
404 return false;
405
406 Size = BitSize / 8;
407 Offset = (unsigned)BitOffset / 8;
408
409 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range")(static_cast <bool> (TRI->getSpillSize(*RC) >= (Offset
+ Size) && "bad subregister range") ? void (0) : __assert_fail
("TRI->getSpillSize(*RC) >= (Offset + Size) && \"bad subregister range\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 409, __extension__ __PRETTY_FUNCTION__
))
;
410
411 if (!MF.getDataLayout().isLittleEndian()) {
412 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
413 }
414 return true;
415}
416
417void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
418 MachineBasicBlock::iterator I,
419 Register DestReg, unsigned SubIdx,
420 const MachineInstr &Orig,
421 const TargetRegisterInfo &TRI) const {
422 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
423 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
424 MBB.insert(I, MI);
425}
426
427bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
428 const MachineInstr &MI1,
429 const MachineRegisterInfo *MRI) const {
430 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
431}
432
433MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
434 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
435 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated")(static_cast <bool> (!Orig.isNotDuplicable() &&
"Instruction cannot be duplicated") ? void (0) : __assert_fail
("!Orig.isNotDuplicable() && \"Instruction cannot be duplicated\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 435, __extension__ __PRETTY_FUNCTION__
))
;
436 MachineFunction &MF = *MBB.getParent();
437 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
438}
439
440// If the COPY instruction in MI can be folded to a stack operation, return
441// the register class to use.
442static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
443 unsigned FoldIdx) {
444 assert(MI.isCopy() && "MI must be a COPY instruction")(static_cast <bool> (MI.isCopy() && "MI must be a COPY instruction"
) ? void (0) : __assert_fail ("MI.isCopy() && \"MI must be a COPY instruction\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 444, __extension__ __PRETTY_FUNCTION__
))
;
445 if (MI.getNumOperands() != 2)
446 return nullptr;
447 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand")(static_cast <bool> (FoldIdx<2 && "FoldIdx refers no nonexistent operand"
) ? void (0) : __assert_fail ("FoldIdx<2 && \"FoldIdx refers no nonexistent operand\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 447, __extension__ __PRETTY_FUNCTION__
))
;
448
449 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
450 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
451
452 if (FoldOp.getSubReg() || LiveOp.getSubReg())
453 return nullptr;
454
455 Register FoldReg = FoldOp.getReg();
456 Register LiveReg = LiveOp.getReg();
457
458 assert(FoldReg.isVirtual() && "Cannot fold physregs")(static_cast <bool> (FoldReg.isVirtual() && "Cannot fold physregs"
) ? void (0) : __assert_fail ("FoldReg.isVirtual() && \"Cannot fold physregs\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 458, __extension__ __PRETTY_FUNCTION__
))
;
459
460 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
461 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
462
463 if (LiveOp.getReg().isPhysical())
464 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
465
466 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
467 return RC;
468
469 // FIXME: Allow folding when register classes are memory compatible.
470 return nullptr;
471}
472
473MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented")::llvm::llvm_unreachable_internal("Not implemented", "llvm/lib/CodeGen/TargetInstrInfo.cpp"
, 473)
; }
474
475std::pair<unsigned, unsigned>
476TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr &MI) const {
477 switch (MI.getOpcode()) {
478 case TargetOpcode::STACKMAP:
479 // StackMapLiveValues are foldable
480 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
481 case TargetOpcode::PATCHPOINT:
482 // For PatchPoint, the call args are not foldable (even if reported in the
483 // stackmap e.g. via anyregcc).
484 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
485 case TargetOpcode::STATEPOINT:
486 // For statepoints, fold deopt and gc arguments, but not call arguments.
487 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
488 default:
489 llvm_unreachable("unexpected stackmap opcode")::llvm::llvm_unreachable_internal("unexpected stackmap opcode"
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 489)
;
490 }
491}
492
493static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
494 ArrayRef<unsigned> Ops, int FrameIndex,
495 const TargetInstrInfo &TII) {
496 unsigned StartIdx = 0;
497 unsigned NumDefs = 0;
498 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
499 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
500
501 unsigned DefToFoldIdx = MI.getNumOperands();
502
503 // Return false if any operands requested for folding are not foldable (not
504 // part of the stackmap's live values).
505 for (unsigned Op : Ops) {
506 if (Op < NumDefs) {
507 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs")(static_cast <bool> (DefToFoldIdx == MI.getNumOperands(
) && "Folding multiple defs") ? void (0) : __assert_fail
("DefToFoldIdx == MI.getNumOperands() && \"Folding multiple defs\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 507, __extension__ __PRETTY_FUNCTION__
))
;
508 DefToFoldIdx = Op;
509 } else if (Op < StartIdx) {
510 return nullptr;
511 }
512 if (MI.getOperand(Op).isTied())
513 return nullptr;
514 }
515
516 MachineInstr *NewMI =
517 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
518 MachineInstrBuilder MIB(MF, NewMI);
519
520 // No need to fold return, the meta data, and function arguments
521 for (unsigned i = 0; i < StartIdx; ++i)
522 if (i != DefToFoldIdx)
523 MIB.add(MI.getOperand(i));
524
525 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
526 MachineOperand &MO = MI.getOperand(i);
527 unsigned TiedTo = e;
528 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
529
530 if (is_contained(Ops, i)) {
531 assert(TiedTo == e && "Cannot fold tied operands")(static_cast <bool> (TiedTo == e && "Cannot fold tied operands"
) ? void (0) : __assert_fail ("TiedTo == e && \"Cannot fold tied operands\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 531, __extension__ __PRETTY_FUNCTION__
))
;
532 unsigned SpillSize;
533 unsigned SpillOffset;
534 // Compute the spill slot size and offset.
535 const TargetRegisterClass *RC =
536 MF.getRegInfo().getRegClass(MO.getReg());
537 bool Valid =
538 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
539 if (!Valid)
540 report_fatal_error("cannot spill patchpoint subregister operand");
541 MIB.addImm(StackMaps::IndirectMemRefOp);
542 MIB.addImm(SpillSize);
543 MIB.addFrameIndex(FrameIndex);
544 MIB.addImm(SpillOffset);
545 } else {
546 MIB.add(MO);
547 if (TiedTo < e) {
548 assert(TiedTo < NumDefs && "Bad tied operand")(static_cast <bool> (TiedTo < NumDefs && "Bad tied operand"
) ? void (0) : __assert_fail ("TiedTo < NumDefs && \"Bad tied operand\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 548, __extension__ __PRETTY_FUNCTION__
))
;
549 if (TiedTo > DefToFoldIdx)
550 --TiedTo;
551 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
552 }
553 }
554 }
555 return NewMI;
556}
557
558MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
559 ArrayRef<unsigned> Ops, int FI,
560 LiveIntervals *LIS,
561 VirtRegMap *VRM) const {
562 auto Flags = MachineMemOperand::MONone;
563 for (unsigned OpIdx : Ops)
564 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
565 : MachineMemOperand::MOLoad;
566
567 MachineBasicBlock *MBB = MI.getParent();
568 assert(MBB && "foldMemoryOperand needs an inserted instruction")(static_cast <bool> (MBB && "foldMemoryOperand needs an inserted instruction"
) ? void (0) : __assert_fail ("MBB && \"foldMemoryOperand needs an inserted instruction\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 568, __extension__ __PRETTY_FUNCTION__
))
;
569 MachineFunction &MF = *MBB->getParent();
570
571 // If we're not folding a load into a subreg, the size of the load is the
572 // size of the spill slot. But if we are, we need to figure out what the
573 // actual load size is.
574 int64_t MemSize = 0;
575 const MachineFrameInfo &MFI = MF.getFrameInfo();
576 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
577
578 if (Flags & MachineMemOperand::MOStore) {
579 MemSize = MFI.getObjectSize(FI);
580 } else {
581 for (unsigned OpIdx : Ops) {
582 int64_t OpSize = MFI.getObjectSize(FI);
583
584 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
585 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
586 if (SubRegSize > 0 && !(SubRegSize % 8))
587 OpSize = SubRegSize / 8;
588 }
589
590 MemSize = std::max(MemSize, OpSize);
591 }
592 }
593
594 assert(MemSize && "Did not expect a zero-sized stack slot")(static_cast <bool> (MemSize && "Did not expect a zero-sized stack slot"
) ? void (0) : __assert_fail ("MemSize && \"Did not expect a zero-sized stack slot\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 594, __extension__ __PRETTY_FUNCTION__
))
;
595
596 MachineInstr *NewMI = nullptr;
597
598 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
599 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
600 MI.getOpcode() == TargetOpcode::STATEPOINT) {
601 // Fold stackmap/patchpoint.
602 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
603 if (NewMI)
604 MBB->insert(MI, NewMI);
605 } else {
606 // Ask the target to do the actual folding.
607 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
608 }
609
610 if (NewMI) {
611 NewMI->setMemRefs(MF, MI.memoperands());
612 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
613 assert((!(Flags & MachineMemOperand::MOStore) ||(static_cast <bool> ((!(Flags & MachineMemOperand::
MOStore) || NewMI->mayStore()) && "Folded a def to a non-store!"
) ? void (0) : __assert_fail ("(!(Flags & MachineMemOperand::MOStore) || NewMI->mayStore()) && \"Folded a def to a non-store!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 615, __extension__ __PRETTY_FUNCTION__
))
614 NewMI->mayStore()) &&(static_cast <bool> ((!(Flags & MachineMemOperand::
MOStore) || NewMI->mayStore()) && "Folded a def to a non-store!"
) ? void (0) : __assert_fail ("(!(Flags & MachineMemOperand::MOStore) || NewMI->mayStore()) && \"Folded a def to a non-store!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 615, __extension__ __PRETTY_FUNCTION__
))
615 "Folded a def to a non-store!")(static_cast <bool> ((!(Flags & MachineMemOperand::
MOStore) || NewMI->mayStore()) && "Folded a def to a non-store!"
) ? void (0) : __assert_fail ("(!(Flags & MachineMemOperand::MOStore) || NewMI->mayStore()) && \"Folded a def to a non-store!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 615, __extension__ __PRETTY_FUNCTION__
))
;
616 assert((!(Flags & MachineMemOperand::MOLoad) ||(static_cast <bool> ((!(Flags & MachineMemOperand::
MOLoad) || NewMI->mayLoad()) && "Folded a use to a non-load!"
) ? void (0) : __assert_fail ("(!(Flags & MachineMemOperand::MOLoad) || NewMI->mayLoad()) && \"Folded a use to a non-load!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 618, __extension__ __PRETTY_FUNCTION__
))
617 NewMI->mayLoad()) &&(static_cast <bool> ((!(Flags & MachineMemOperand::
MOLoad) || NewMI->mayLoad()) && "Folded a use to a non-load!"
) ? void (0) : __assert_fail ("(!(Flags & MachineMemOperand::MOLoad) || NewMI->mayLoad()) && \"Folded a use to a non-load!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 618, __extension__ __PRETTY_FUNCTION__
))
618 "Folded a use to a non-load!")(static_cast <bool> ((!(Flags & MachineMemOperand::
MOLoad) || NewMI->mayLoad()) && "Folded a use to a non-load!"
) ? void (0) : __assert_fail ("(!(Flags & MachineMemOperand::MOLoad) || NewMI->mayLoad()) && \"Folded a use to a non-load!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 618, __extension__ __PRETTY_FUNCTION__
))
;
619 assert(MFI.getObjectOffset(FI) != -1)(static_cast <bool> (MFI.getObjectOffset(FI) != -1) ? void
(0) : __assert_fail ("MFI.getObjectOffset(FI) != -1", "llvm/lib/CodeGen/TargetInstrInfo.cpp"
, 619, __extension__ __PRETTY_FUNCTION__))
;
620 MachineMemOperand *MMO =
621 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
622 Flags, MemSize, MFI.getObjectAlign(FI));
623 NewMI->addMemOperand(MF, MMO);
624
625 // The pass "x86 speculative load hardening" always attaches symbols to
626 // call instructions. We need copy it form old instruction.
627 NewMI->cloneInstrSymbols(MF, MI);
628
629 return NewMI;
630 }
631
632 // Straight COPY may fold as load/store.
633 if (!MI.isCopy() || Ops.size() != 1)
634 return nullptr;
635
636 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
637 if (!RC)
638 return nullptr;
639
640 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
641 MachineBasicBlock::iterator Pos = MI;
642
643 if (Flags == MachineMemOperand::MOStore)
644 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI,
645 Register());
646 else
647 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register());
648 return &*--Pos;
649}
650
651MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
652 ArrayRef<unsigned> Ops,
653 MachineInstr &LoadMI,
654 LiveIntervals *LIS) const {
655 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!")(static_cast <bool> (LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"
) ? void (0) : __assert_fail ("LoadMI.canFoldAsLoad() && \"LoadMI isn't foldable!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 655, __extension__ __PRETTY_FUNCTION__
))
;
656#ifndef NDEBUG
657 for (unsigned OpIdx : Ops)
658 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!")(static_cast <bool> (MI.getOperand(OpIdx).isUse() &&
"Folding load into def!") ? void (0) : __assert_fail ("MI.getOperand(OpIdx).isUse() && \"Folding load into def!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 658, __extension__ __PRETTY_FUNCTION__
))
;
659#endif
660
661 MachineBasicBlock &MBB = *MI.getParent();
662 MachineFunction &MF = *MBB.getParent();
663
664 // Ask the target to do the actual folding.
665 MachineInstr *NewMI = nullptr;
666 int FrameIndex = 0;
667
668 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
669 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
670 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
671 isLoadFromStackSlot(LoadMI, FrameIndex)) {
672 // Fold stackmap/patchpoint.
673 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
674 if (NewMI)
675 NewMI = &*MBB.insert(MI, NewMI);
676 } else {
677 // Ask the target to do the actual folding.
678 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
679 }
680
681 if (!NewMI)
682 return nullptr;
683
684 // Copy the memoperands from the load to the folded instruction.
685 if (MI.memoperands_empty()) {
686 NewMI->setMemRefs(MF, LoadMI.memoperands());
687 } else {
688 // Handle the rare case of folding multiple loads.
689 NewMI->setMemRefs(MF, MI.memoperands());
690 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
691 E = LoadMI.memoperands_end();
692 I != E; ++I) {
693 NewMI->addMemOperand(MF, *I);
694 }
695 }
696 return NewMI;
697}
698
699bool TargetInstrInfo::hasReassociableOperands(
700 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
701 const MachineOperand &Op1 = Inst.getOperand(1);
702 const MachineOperand &Op2 = Inst.getOperand(2);
703 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
704
705 // We need virtual register definitions for the operands that we will
706 // reassociate.
707 MachineInstr *MI1 = nullptr;
708 MachineInstr *MI2 = nullptr;
709 if (Op1.isReg() && Op1.getReg().isVirtual())
710 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
711 if (Op2.isReg() && Op2.getReg().isVirtual())
712 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
713
714 // And at least one operand must be defined in MBB.
715 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
716}
717
718bool TargetInstrInfo::areOpcodesEqualOrInverse(unsigned Opcode1,
719 unsigned Opcode2) const {
720 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
721}
722
723bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
724 bool &Commuted) const {
725 const MachineBasicBlock *MBB = Inst.getParent();
726 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
727 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
728 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
729 unsigned Opcode = Inst.getOpcode();
730
731 // If only one operand has the same or inverse opcode and it's the second
732 // source operand, the operands must be commuted.
733 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
734 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
735 if (Commuted)
736 std::swap(MI1, MI2);
737
738 // 1. The previous instruction must be the same type as Inst.
739 // 2. The previous instruction must also be associative/commutative or be the
740 // inverse of such an operation (this can be different even for
741 // instructions with the same opcode if traits like fast-math-flags are
742 // included).
743 // 3. The previous instruction must have virtual register definitions for its
744 // operands in the same basic block as Inst.
745 // 4. The previous instruction's result must only be used by Inst.
746 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
747 (isAssociativeAndCommutative(*MI1) ||
748 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
749 hasReassociableOperands(*MI1, MBB) &&
750 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
751}
752
753// 1. The operation must be associative and commutative or be the inverse of
754// such an operation.
755// 2. The instruction must have virtual register definitions for its
756// operands in the same basic block.
757// 3. The instruction must have a reassociable sibling.
758bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
759 bool &Commuted) const {
760 return (isAssociativeAndCommutative(Inst) ||
761 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
762 hasReassociableOperands(Inst, Inst.getParent()) &&
763 hasReassociableSibling(Inst, Commuted);
764}
765
766// The concept of the reassociation pass is that these operations can benefit
767// from this kind of transformation:
768//
769// A = ? op ?
770// B = A op X (Prev)
771// C = B op Y (Root)
772// -->
773// A = ? op ?
774// B = X op Y
775// C = A op B
776//
777// breaking the dependency between A and B, allowing them to be executed in
778// parallel (or back-to-back in a pipeline) instead of depending on each other.
779
780// FIXME: This has the potential to be expensive (compile time) while not
781// improving the code at all. Some ways to limit the overhead:
782// 1. Track successful transforms; bail out if hit rate gets too low.
783// 2. Only enable at -O3 or some other non-default optimization level.
784// 3. Pre-screen pattern candidates here: if an operand of the previous
785// instruction is known to not increase the critical path, then don't match
786// that pattern.
787bool TargetInstrInfo::getMachineCombinerPatterns(
788 MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
789 bool DoRegPressureReduce) const {
790 bool Commute;
791 if (isReassociationCandidate(Root, Commute)) {
792 // We found a sequence of instructions that may be suitable for a
793 // reassociation of operands to increase ILP. Specify each commutation
794 // possibility for the Prev instruction in the sequence and let the
795 // machine combiner decide if changing the operands is worthwhile.
796 if (Commute) {
797 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
798 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
799 } else {
800 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
801 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
802 }
803 return true;
804 }
805
806 return false;
807}
808
809/// Return true when a code sequence can improve loop throughput.
810bool
811TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
812 return false;
813}
814
815std::pair<unsigned, unsigned>
816TargetInstrInfo::getReassociationOpcodes(MachineCombinerPattern Pattern,
817 const MachineInstr &Root,
818 const MachineInstr &Prev) const {
819 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
820 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
821
822 // Early exit if both opcodes are associative and commutative. It's a trivial
823 // reassociation when we only change operands order. In this case opcodes are
824 // not required to have inverse versions.
825 if (AssocCommutRoot && AssocCommutPrev) {
826 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal")(static_cast <bool> (Root.getOpcode() == Prev.getOpcode
() && "Expected to be equal") ? void (0) : __assert_fail
("Root.getOpcode() == Prev.getOpcode() && \"Expected to be equal\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 826, __extension__ __PRETTY_FUNCTION__
))
;
827 return std::make_pair(Root.getOpcode(), Root.getOpcode());
828 }
829
830 // At least one instruction is not associative or commutative.
831 // Since we have matched one of the reassociation patterns, we expect that the
832 // instructions' opcodes are equal or one of them is the inversion of the
833 // other.
834 assert(areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()) &&(static_cast <bool> (areOpcodesEqualOrInverse(Root.getOpcode
(), Prev.getOpcode()) && "Incorrectly matched pattern"
) ? void (0) : __assert_fail ("areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()) && \"Incorrectly matched pattern\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 835, __extension__ __PRETTY_FUNCTION__
))
835 "Incorrectly matched pattern")(static_cast <bool> (areOpcodesEqualOrInverse(Root.getOpcode
(), Prev.getOpcode()) && "Incorrectly matched pattern"
) ? void (0) : __assert_fail ("areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()) && \"Incorrectly matched pattern\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 835, __extension__ __PRETTY_FUNCTION__
))
;
836 unsigned AssocCommutOpcode = Root.getOpcode();
837 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
838 if (!AssocCommutRoot)
839 std::swap(AssocCommutOpcode, InverseOpcode);
840
841 // The transformation rule (`+` is any associative and commutative binary
842 // operation, `-` is the inverse):
843 // REASSOC_AX_BY:
844 // (A + X) + Y => A + (X + Y)
845 // (A + X) - Y => A + (X - Y)
846 // (A - X) + Y => A - (X - Y)
847 // (A - X) - Y => A - (X + Y)
848 // REASSOC_XA_BY:
849 // (X + A) + Y => (X + Y) + A
850 // (X + A) - Y => (X - Y) + A
851 // (X - A) + Y => (X + Y) - A
852 // (X - A) - Y => (X - Y) - A
853 // REASSOC_AX_YB:
854 // Y + (A + X) => (Y + X) + A
855 // Y - (A + X) => (Y - X) - A
856 // Y + (A - X) => (Y - X) + A
857 // Y - (A - X) => (Y + X) - A
858 // REASSOC_XA_YB:
859 // Y + (X + A) => (Y + X) + A
860 // Y - (X + A) => (Y - X) - A
861 // Y + (X - A) => (Y + X) - A
862 // Y - (X - A) => (Y - X) + A
863 switch (Pattern) {
864 default:
865 llvm_unreachable("Unexpected pattern")::llvm::llvm_unreachable_internal("Unexpected pattern", "llvm/lib/CodeGen/TargetInstrInfo.cpp"
, 865)
;
866 case MachineCombinerPattern::REASSOC_AX_BY:
867 if (!AssocCommutRoot && AssocCommutPrev)
868 return {AssocCommutOpcode, InverseOpcode};
869 if (AssocCommutRoot && !AssocCommutPrev)
870 return {InverseOpcode, InverseOpcode};
871 if (!AssocCommutRoot && !AssocCommutPrev)
872 return {InverseOpcode, AssocCommutOpcode};
873 break;
874 case MachineCombinerPattern::REASSOC_XA_BY:
875 if (!AssocCommutRoot && AssocCommutPrev)
876 return {AssocCommutOpcode, InverseOpcode};
877 if (AssocCommutRoot && !AssocCommutPrev)
878 return {InverseOpcode, AssocCommutOpcode};
879 if (!AssocCommutRoot && !AssocCommutPrev)
880 return {InverseOpcode, InverseOpcode};
881 break;
882 case MachineCombinerPattern::REASSOC_AX_YB:
883 if (!AssocCommutRoot && AssocCommutPrev)
884 return {InverseOpcode, InverseOpcode};
885 if (AssocCommutRoot && !AssocCommutPrev)
886 return {AssocCommutOpcode, InverseOpcode};
887 if (!AssocCommutRoot && !AssocCommutPrev)
888 return {InverseOpcode, AssocCommutOpcode};
889 break;
890 case MachineCombinerPattern::REASSOC_XA_YB:
891 if (!AssocCommutRoot && AssocCommutPrev)
892 return {InverseOpcode, InverseOpcode};
893 if (AssocCommutRoot && !AssocCommutPrev)
894 return {InverseOpcode, AssocCommutOpcode};
895 if (!AssocCommutRoot && !AssocCommutPrev)
896 return {AssocCommutOpcode, InverseOpcode};
897 break;
898 }
899 llvm_unreachable("Unhandled combination")::llvm::llvm_unreachable_internal("Unhandled combination", "llvm/lib/CodeGen/TargetInstrInfo.cpp"
, 899)
;
900}
901
902// Return a pair of boolean flags showing if the new root and new prev operands
903// must be swapped. See visual example of the rule in
904// TargetInstrInfo::getReassociationOpcodes.
905static std::pair<bool, bool> mustSwapOperands(MachineCombinerPattern Pattern) {
906 switch (Pattern) {
907 default:
908 llvm_unreachable("Unexpected pattern")::llvm::llvm_unreachable_internal("Unexpected pattern", "llvm/lib/CodeGen/TargetInstrInfo.cpp"
, 908)
;
909 case MachineCombinerPattern::REASSOC_AX_BY:
910 return {false, false};
911 case MachineCombinerPattern::REASSOC_XA_BY:
912 return {true, false};
913 case MachineCombinerPattern::REASSOC_AX_YB:
914 return {true, true};
915 case MachineCombinerPattern::REASSOC_XA_YB:
916 return {true, true};
917 }
918}
919
920/// Attempt the reassociation transformation to reduce critical path length.
921/// See the above comments before getMachineCombinerPatterns().
922void TargetInstrInfo::reassociateOps(
923 MachineInstr &Root, MachineInstr &Prev,
924 MachineCombinerPattern Pattern,
925 SmallVectorImpl<MachineInstr *> &InsInstrs,
926 SmallVectorImpl<MachineInstr *> &DelInstrs,
927 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
928 MachineFunction *MF = Root.getMF();
929 MachineRegisterInfo &MRI = MF->getRegInfo();
930 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
931 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
932 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
933
934 // This array encodes the operand index for each parameter because the
935 // operands may be commuted. Each row corresponds to a pattern value,
936 // and each column specifies the index of A, B, X, Y.
937 unsigned OpIdx[4][4] = {
938 { 1, 1, 2, 2 },
939 { 1, 2, 2, 1 },
940 { 2, 1, 1, 2 },
941 { 2, 2, 1, 1 }
942 };
943
944 int Row;
945 switch (Pattern) {
946 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
947 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
948 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
949 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
950 default: llvm_unreachable("unexpected MachineCombinerPattern")::llvm::llvm_unreachable_internal("unexpected MachineCombinerPattern"
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 950)
;
951 }
952
953 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
954 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
955 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
956 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
957 MachineOperand &OpC = Root.getOperand(0);
958
959 Register RegA = OpA.getReg();
960 Register RegB = OpB.getReg();
961 Register RegX = OpX.getReg();
962 Register RegY = OpY.getReg();
963 Register RegC = OpC.getReg();
964
965 if (RegA.isVirtual())
966 MRI.constrainRegClass(RegA, RC);
967 if (RegB.isVirtual())
968 MRI.constrainRegClass(RegB, RC);
969 if (RegX.isVirtual())
970 MRI.constrainRegClass(RegX, RC);
971 if (RegY.isVirtual())
972 MRI.constrainRegClass(RegY, RC);
973 if (RegC.isVirtual())
974 MRI.constrainRegClass(RegC, RC);
975
976 // Create a new virtual register for the result of (X op Y) instead of
977 // recycling RegB because the MachineCombiner's computation of the critical
978 // path requires a new register definition rather than an existing one.
979 Register NewVR = MRI.createVirtualRegister(RC);
980 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
981
982 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
983 bool KillA = OpA.isKill();
984 bool KillX = OpX.isKill();
985 bool KillY = OpY.isKill();
986 bool KillNewVR = true;
987
988 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
989
990 if (SwapPrevOperands) {
991 std::swap(RegX, RegY);
992 std::swap(KillX, KillY);
993 }
994
995 // Create new instructions for insertion.
996 MachineInstrBuilder MIB1 =
997 BuildMI(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR)
998 .addReg(RegX, getKillRegState(KillX))
999 .addReg(RegY, getKillRegState(KillY))
1000 .setMIFlags(Prev.getFlags());
1001
1002 if (SwapRootOperands) {
1003 std::swap(RegA, NewVR);
1004 std::swap(KillA, KillNewVR);
1005 }
1006
1007 MachineInstrBuilder MIB2 =
1008 BuildMI(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC)
1009 .addReg(RegA, getKillRegState(KillA))
1010 .addReg(NewVR, getKillRegState(KillNewVR))
1011 .setMIFlags(Root.getFlags());
1012
1013 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1014
1015 // Record new instructions for insertion and old instructions for deletion.
1016 InsInstrs.push_back(MIB1);
1017 InsInstrs.push_back(MIB2);
1018 DelInstrs.push_back(&Prev);
1019 DelInstrs.push_back(&Root);
1020}
1021
1022void TargetInstrInfo::genAlternativeCodeSequence(
1023 MachineInstr &Root, MachineCombinerPattern Pattern,
1024 SmallVectorImpl<MachineInstr *> &InsInstrs,
1025 SmallVectorImpl<MachineInstr *> &DelInstrs,
1026 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
1027 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1028
1029 // Select the previous instruction in the sequence based on the input pattern.
1030 MachineInstr *Prev = nullptr;
1
'Prev' initialized to a null pointer value
1031 switch (Pattern) {
2
Control jumps to the 'default' case at line 1040
1032 case MachineCombinerPattern::REASSOC_AX_BY:
1033 case MachineCombinerPattern::REASSOC_XA_BY:
1034 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
1035 break;
1036 case MachineCombinerPattern::REASSOC_AX_YB:
1037 case MachineCombinerPattern::REASSOC_XA_YB:
1038 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
1039 break;
1040 default:
1041 break;
1042 }
1043
1044 // Don't reassociate if Prev and Root are in different blocks.
1045 if (Prev->getParent() != Root.getParent())
3
Execution continues on line 1045
4
Called C++ object pointer is null
1046 return;
1047
1048 assert(Prev && "Unknown pattern for machine combiner")(static_cast <bool> (Prev && "Unknown pattern for machine combiner"
) ? void (0) : __assert_fail ("Prev && \"Unknown pattern for machine combiner\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1048, __extension__
__PRETTY_FUNCTION__))
;
1049
1050 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
1051}
1052
1053MachineTraceStrategy TargetInstrInfo::getMachineCombinerTraceStrategy() const {
1054 return MachineTraceStrategy::TS_MinInstrCount;
1055}
1056
1057bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
1058 const MachineInstr &MI) const {
1059 const MachineFunction &MF = *MI.getMF();
1060 const MachineRegisterInfo &MRI = MF.getRegInfo();
1061
1062 // Remat clients assume operand 0 is the defined register.
1063 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1064 return false;
1065 Register DefReg = MI.getOperand(0).getReg();
1066
1067 // A sub-register definition can only be rematerialized if the instruction
1068 // doesn't read the other parts of the register. Otherwise it is really a
1069 // read-modify-write operation on the full virtual register which cannot be
1070 // moved safely.
1071 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1072 MI.readsVirtualRegister(DefReg))
1073 return false;
1074
1075 // A load from a fixed stack slot can be rematerialized. This may be
1076 // redundant with subsequent checks, but it's target-independent,
1077 // simple, and a common case.
1078 int FrameIdx = 0;
1079 if (isLoadFromStackSlot(MI, FrameIdx) &&
1080 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1081 return true;
1082
1083 // Avoid instructions obviously unsafe for remat.
1084 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1085 MI.hasUnmodeledSideEffects())
1086 return false;
1087
1088 // Don't remat inline asm. We have no idea how expensive it is
1089 // even if it's side effect free.
1090 if (MI.isInlineAsm())
1091 return false;
1092
1093 // Avoid instructions which load from potentially varying memory.
1094 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1095 return false;
1096
1097 // If any of the registers accessed are non-constant, conservatively assume
1098 // the instruction is not rematerializable.
1099 for (const MachineOperand &MO : MI.operands()) {
1100 if (!MO.isReg()) continue;
1101 Register Reg = MO.getReg();
1102 if (Reg == 0)
1103 continue;
1104
1105 // Check for a well-behaved physical register.
1106 if (Reg.isPhysical()) {
1107 if (MO.isUse()) {
1108 // If the physreg has no defs anywhere, it's just an ambient register
1109 // and we can freely move its uses. Alternatively, if it's allocatable,
1110 // it could get allocated to something with a def during allocation.
1111 if (!MRI.isConstantPhysReg(Reg))
1112 return false;
1113 } else {
1114 // A physreg def. We can't remat it.
1115 return false;
1116 }
1117 continue;
1118 }
1119
1120 // Only allow one virtual-register def. There may be multiple defs of the
1121 // same virtual register, though.
1122 if (MO.isDef() && Reg != DefReg)
1123 return false;
1124
1125 // Don't allow any virtual-register uses. Rematting an instruction with
1126 // virtual register uses would length the live ranges of the uses, which
1127 // is not necessarily a good idea, certainly not "trivial".
1128 if (MO.isUse())
1129 return false;
1130 }
1131
1132 // Everything checked out.
1133 return true;
1134}
1135
1136int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
1137 const MachineFunction *MF = MI.getMF();
1138 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
1139 bool StackGrowsDown =
1140 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
1141
1142 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1143 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1144
1145 if (!isFrameInstr(MI))
1146 return 0;
1147
1148 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1149
1150 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1151 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1152 SPAdj = -SPAdj;
1153
1154 return SPAdj;
1155}
1156
1157/// isSchedulingBoundary - Test if the given instruction should be
1158/// considered a scheduling boundary. This primarily includes labels
1159/// and terminators.
1160bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1161 const MachineBasicBlock *MBB,
1162 const MachineFunction &MF) const {
1163 // Terminators and labels can't be scheduled around.
1164 if (MI.isTerminator() || MI.isPosition())
1165 return true;
1166
1167 // INLINEASM_BR can jump to another block
1168 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1169 return true;
1170
1171 // Don't attempt to schedule around any instruction that defines
1172 // a stack-oriented pointer, as it's unlikely to be profitable. This
1173 // saves compile time, because it doesn't require every single
1174 // stack slot reference to depend on the instruction that does the
1175 // modification.
1176 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1177 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1178 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1179}
1180
1181// Provide a global flag for disabling the PreRA hazard recognizer that targets
1182// may choose to honor.
1183bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1184 return !DisableHazardRecognizer;
1185}
1186
1187// Default implementation of CreateTargetRAHazardRecognizer.
1188ScheduleHazardRecognizer *TargetInstrInfo::
1189CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1190 const ScheduleDAG *DAG) const {
1191 // Dummy hazard recognizer allows all instructions to issue.
1192 return new ScheduleHazardRecognizer();
1193}
1194
1195// Default implementation of CreateTargetMIHazardRecognizer.
1196ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer(
1197 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1198 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1199}
1200
1201// Default implementation of CreateTargetPostRAHazardRecognizer.
1202ScheduleHazardRecognizer *TargetInstrInfo::
1203CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1204 const ScheduleDAG *DAG) const {
1205 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1206}
1207
1208// Default implementation of getMemOperandWithOffset.
1209bool TargetInstrInfo::getMemOperandWithOffset(
1210 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1211 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1212 SmallVector<const MachineOperand *, 4> BaseOps;
1213 unsigned Width;
1214 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1215 Width, TRI) ||
1216 BaseOps.size() != 1)
1217 return false;
1218 BaseOp = BaseOps.front();
1219 return true;
1220}
1221
1222//===----------------------------------------------------------------------===//
1223// SelectionDAG latency interface.
1224//===----------------------------------------------------------------------===//
1225
1226int
1227TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1228 SDNode *DefNode, unsigned DefIdx,
1229 SDNode *UseNode, unsigned UseIdx) const {
1230 if (!ItinData || ItinData->isEmpty())
1231 return -1;
1232
1233 if (!DefNode->isMachineOpcode())
1234 return -1;
1235
1236 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1237 if (!UseNode->isMachineOpcode())
1238 return ItinData->getOperandCycle(DefClass, DefIdx);
1239 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1240 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1241}
1242
1243int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1244 SDNode *N) const {
1245 if (!ItinData || ItinData->isEmpty())
1246 return 1;
1247
1248 if (!N->isMachineOpcode())
1249 return 1;
1250
1251 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1252}
1253
1254//===----------------------------------------------------------------------===//
1255// MachineInstr latency interface.
1256//===----------------------------------------------------------------------===//
1257
1258unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1259 const MachineInstr &MI) const {
1260 if (!ItinData || ItinData->isEmpty())
1261 return 1;
1262
1263 unsigned Class = MI.getDesc().getSchedClass();
1264 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1265 if (UOps >= 0)
1266 return UOps;
1267
1268 // The # of u-ops is dynamically determined. The specific target should
1269 // override this function to return the right number.
1270 return 1;
1271}
1272
1273/// Return the default expected latency for a def based on it's opcode.
1274unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1275 const MachineInstr &DefMI) const {
1276 if (DefMI.isTransient())
1277 return 0;
1278 if (DefMI.mayLoad())
1279 return SchedModel.LoadLatency;
1280 if (isHighLatencyDef(DefMI.getOpcode()))
1281 return SchedModel.HighLatency;
1282 return 1;
1283}
1284
1285unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1286 return 0;
1287}
1288
1289unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1290 const MachineInstr &MI,
1291 unsigned *PredCost) const {
1292 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1293 // still have a MinLatency property, which getStageLatency checks.
1294 if (!ItinData)
1295 return MI.mayLoad() ? 2 : 1;
1296
1297 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1298}
1299
1300bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1301 const MachineInstr &DefMI,
1302 unsigned DefIdx) const {
1303 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1304 if (!ItinData || ItinData->isEmpty())
1305 return false;
1306
1307 unsigned DefClass = DefMI.getDesc().getSchedClass();
1308 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1309 return (DefCycle != -1 && DefCycle <= 1);
1310}
1311
1312std::optional<ParamLoadedValue>
1313TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
1314 Register Reg) const {
1315 const MachineFunction *MF = MI.getMF();
1316 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1317 DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});
1318 int64_t Offset;
1319 bool OffsetIsScalable;
1320
1321 // To simplify the sub-register handling, verify that we only need to
1322 // consider physical registers.
1323 assert(MF->getProperties().hasProperty((static_cast <bool> (MF->getProperties().hasProperty
( MachineFunctionProperties::Property::NoVRegs)) ? void (0) :
__assert_fail ("MF->getProperties().hasProperty( MachineFunctionProperties::Property::NoVRegs)"
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1324, __extension__
__PRETTY_FUNCTION__))
1324 MachineFunctionProperties::Property::NoVRegs))(static_cast <bool> (MF->getProperties().hasProperty
( MachineFunctionProperties::Property::NoVRegs)) ? void (0) :
__assert_fail ("MF->getProperties().hasProperty( MachineFunctionProperties::Property::NoVRegs)"
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1324, __extension__
__PRETTY_FUNCTION__))
;
1325
1326 if (auto DestSrc = isCopyInstr(MI)) {
1327 Register DestReg = DestSrc->Destination->getReg();
1328
1329 // If the copy destination is the forwarding reg, describe the forwarding
1330 // reg using the copy source as the backup location. Example:
1331 //
1332 // x0 = MOV x7
1333 // call callee(x0) ; x0 described as x7
1334 if (Reg == DestReg)
1335 return ParamLoadedValue(*DestSrc->Source, Expr);
1336
1337 // Cases where super- or sub-registers needs to be described should
1338 // be handled by the target's hook implementation.
1339 assert(!TRI->isSuperOrSubRegisterEq(Reg, DestReg) &&(static_cast <bool> (!TRI->isSuperOrSubRegisterEq(Reg
, DestReg) && "TargetInstrInfo::describeLoadedValue can't describe super- or "
"sub-regs for copy instructions") ? void (0) : __assert_fail
("!TRI->isSuperOrSubRegisterEq(Reg, DestReg) && \"TargetInstrInfo::describeLoadedValue can't describe super- or \" \"sub-regs for copy instructions\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1341, __extension__
__PRETTY_FUNCTION__))
1340 "TargetInstrInfo::describeLoadedValue can't describe super- or "(static_cast <bool> (!TRI->isSuperOrSubRegisterEq(Reg
, DestReg) && "TargetInstrInfo::describeLoadedValue can't describe super- or "
"sub-regs for copy instructions") ? void (0) : __assert_fail
("!TRI->isSuperOrSubRegisterEq(Reg, DestReg) && \"TargetInstrInfo::describeLoadedValue can't describe super- or \" \"sub-regs for copy instructions\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1341, __extension__
__PRETTY_FUNCTION__))
1341 "sub-regs for copy instructions")(static_cast <bool> (!TRI->isSuperOrSubRegisterEq(Reg
, DestReg) && "TargetInstrInfo::describeLoadedValue can't describe super- or "
"sub-regs for copy instructions") ? void (0) : __assert_fail
("!TRI->isSuperOrSubRegisterEq(Reg, DestReg) && \"TargetInstrInfo::describeLoadedValue can't describe super- or \" \"sub-regs for copy instructions\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1341, __extension__
__PRETTY_FUNCTION__))
;
1342 return std::nullopt;
1343 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1344 Register SrcReg = RegImm->Reg;
1345 Offset = RegImm->Imm;
1346 Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset);
1347 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1348 } else if (MI.hasOneMemOperand()) {
1349 // Only describe memory which provably does not escape the function. As
1350 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1351 // callee (or by another thread).
1352 const auto &TII = MF->getSubtarget().getInstrInfo();
1353 const MachineFrameInfo &MFI = MF->getFrameInfo();
1354 const MachineMemOperand *MMO = MI.memoperands()[0];
1355 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1356
1357 // If the address points to "special" memory (e.g. a spill slot), it's
1358 // sufficient to check that it isn't aliased by any high-level IR value.
1359 if (!PSV || PSV->mayAlias(&MFI))
1360 return std::nullopt;
1361
1362 const MachineOperand *BaseOp;
1363 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1364 TRI))
1365 return std::nullopt;
1366
1367 // FIXME: Scalable offsets are not yet handled in the offset code below.
1368 if (OffsetIsScalable)
1369 return std::nullopt;
1370
1371 // TODO: Can currently only handle mem instructions with a single define.
1372 // An example from the x86 target:
1373 // ...
1374 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1375 // ...
1376 //
1377 if (MI.getNumExplicitDefs() != 1)
1378 return std::nullopt;
1379
1380 // TODO: In what way do we need to take Reg into consideration here?
1381
1382 SmallVector<uint64_t, 8> Ops;
1383 DIExpression::appendOffset(Ops, Offset);
1384 Ops.push_back(dwarf::DW_OP_deref_size);
1385 Ops.push_back(MMO->getSize());
1386 Expr = DIExpression::prependOpcodes(Expr, Ops);
1387 return ParamLoadedValue(*BaseOp, Expr);
1388 }
1389
1390 return std::nullopt;
1391}
1392
1393/// Both DefMI and UseMI must be valid. By default, call directly to the
1394/// itinerary. This may be overriden by the target.
1395int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1396 const MachineInstr &DefMI,
1397 unsigned DefIdx,
1398 const MachineInstr &UseMI,
1399 unsigned UseIdx) const {
1400 unsigned DefClass = DefMI.getDesc().getSchedClass();
1401 unsigned UseClass = UseMI.getDesc().getSchedClass();
1402 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1403}
1404
1405bool TargetInstrInfo::getRegSequenceInputs(
1406 const MachineInstr &MI, unsigned DefIdx,
1407 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1408 assert((MI.isRegSequence() ||(static_cast <bool> ((MI.isRegSequence() || MI.isRegSequenceLike
()) && "Instruction do not have the proper type") ? void
(0) : __assert_fail ("(MI.isRegSequence() || MI.isRegSequenceLike()) && \"Instruction do not have the proper type\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1409, __extension__
__PRETTY_FUNCTION__))
1409 MI.isRegSequenceLike()) && "Instruction do not have the proper type")(static_cast <bool> ((MI.isRegSequence() || MI.isRegSequenceLike
()) && "Instruction do not have the proper type") ? void
(0) : __assert_fail ("(MI.isRegSequence() || MI.isRegSequenceLike()) && \"Instruction do not have the proper type\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1409, __extension__
__PRETTY_FUNCTION__))
;
1410
1411 if (!MI.isRegSequence())
1412 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1413
1414 // We are looking at:
1415 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1416 assert(DefIdx == 0 && "REG_SEQUENCE only has one def")(static_cast <bool> (DefIdx == 0 && "REG_SEQUENCE only has one def"
) ? void (0) : __assert_fail ("DefIdx == 0 && \"REG_SEQUENCE only has one def\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1416, __extension__
__PRETTY_FUNCTION__))
;
1417 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1418 OpIdx += 2) {
1419 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1420 if (MOReg.isUndef())
1421 continue;
1422 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1423 assert(MOSubIdx.isImm() &&(static_cast <bool> (MOSubIdx.isImm() && "One of the subindex of the reg_sequence is not an immediate"
) ? void (0) : __assert_fail ("MOSubIdx.isImm() && \"One of the subindex of the reg_sequence is not an immediate\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1424, __extension__
__PRETTY_FUNCTION__))
1424 "One of the subindex of the reg_sequence is not an immediate")(static_cast <bool> (MOSubIdx.isImm() && "One of the subindex of the reg_sequence is not an immediate"
) ? void (0) : __assert_fail ("MOSubIdx.isImm() && \"One of the subindex of the reg_sequence is not an immediate\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1424, __extension__
__PRETTY_FUNCTION__))
;
1425 // Record Reg:SubReg, SubIdx.
1426 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1427 (unsigned)MOSubIdx.getImm()));
1428 }
1429 return true;
1430}
1431
1432bool TargetInstrInfo::getExtractSubregInputs(
1433 const MachineInstr &MI, unsigned DefIdx,
1434 RegSubRegPairAndIdx &InputReg) const {
1435 assert((MI.isExtractSubreg() ||(static_cast <bool> ((MI.isExtractSubreg() || MI.isExtractSubregLike
()) && "Instruction do not have the proper type") ? void
(0) : __assert_fail ("(MI.isExtractSubreg() || MI.isExtractSubregLike()) && \"Instruction do not have the proper type\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1436, __extension__
__PRETTY_FUNCTION__))
1436 MI.isExtractSubregLike()) && "Instruction do not have the proper type")(static_cast <bool> ((MI.isExtractSubreg() || MI.isExtractSubregLike
()) && "Instruction do not have the proper type") ? void
(0) : __assert_fail ("(MI.isExtractSubreg() || MI.isExtractSubregLike()) && \"Instruction do not have the proper type\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1436, __extension__
__PRETTY_FUNCTION__))
;
1437
1438 if (!MI.isExtractSubreg())
1439 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1440
1441 // We are looking at:
1442 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1443 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def")(static_cast <bool> (DefIdx == 0 && "EXTRACT_SUBREG only has one def"
) ? void (0) : __assert_fail ("DefIdx == 0 && \"EXTRACT_SUBREG only has one def\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1443, __extension__
__PRETTY_FUNCTION__))
;
1444 const MachineOperand &MOReg = MI.getOperand(1);
1445 if (MOReg.isUndef())
1446 return false;
1447 const MachineOperand &MOSubIdx = MI.getOperand(2);
1448 assert(MOSubIdx.isImm() &&(static_cast <bool> (MOSubIdx.isImm() && "The subindex of the extract_subreg is not an immediate"
) ? void (0) : __assert_fail ("MOSubIdx.isImm() && \"The subindex of the extract_subreg is not an immediate\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1449, __extension__
__PRETTY_FUNCTION__))
1449 "The subindex of the extract_subreg is not an immediate")(static_cast <bool> (MOSubIdx.isImm() && "The subindex of the extract_subreg is not an immediate"
) ? void (0) : __assert_fail ("MOSubIdx.isImm() && \"The subindex of the extract_subreg is not an immediate\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1449, __extension__
__PRETTY_FUNCTION__))
;
1450
1451 InputReg.Reg = MOReg.getReg();
1452 InputReg.SubReg = MOReg.getSubReg();
1453 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1454 return true;
1455}
1456
1457bool TargetInstrInfo::getInsertSubregInputs(
1458 const MachineInstr &MI, unsigned DefIdx,
1459 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1460 assert((MI.isInsertSubreg() ||(static_cast <bool> ((MI.isInsertSubreg() || MI.isInsertSubregLike
()) && "Instruction do not have the proper type") ? void
(0) : __assert_fail ("(MI.isInsertSubreg() || MI.isInsertSubregLike()) && \"Instruction do not have the proper type\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1461, __extension__
__PRETTY_FUNCTION__))
1461 MI.isInsertSubregLike()) && "Instruction do not have the proper type")(static_cast <bool> ((MI.isInsertSubreg() || MI.isInsertSubregLike
()) && "Instruction do not have the proper type") ? void
(0) : __assert_fail ("(MI.isInsertSubreg() || MI.isInsertSubregLike()) && \"Instruction do not have the proper type\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1461, __extension__
__PRETTY_FUNCTION__))
;
1462
1463 if (!MI.isInsertSubreg())
1464 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1465
1466 // We are looking at:
1467 // Def = INSERT_SEQUENCE v0, v1, sub0.
1468 assert(DefIdx == 0 && "INSERT_SUBREG only has one def")(static_cast <bool> (DefIdx == 0 && "INSERT_SUBREG only has one def"
) ? void (0) : __assert_fail ("DefIdx == 0 && \"INSERT_SUBREG only has one def\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1468, __extension__
__PRETTY_FUNCTION__))
;
1469 const MachineOperand &MOBaseReg = MI.getOperand(1);
1470 const MachineOperand &MOInsertedReg = MI.getOperand(2);
1471 if (MOInsertedReg.isUndef())
1472 return false;
1473 const MachineOperand &MOSubIdx = MI.getOperand(3);
1474 assert(MOSubIdx.isImm() &&(static_cast <bool> (MOSubIdx.isImm() && "One of the subindex of the reg_sequence is not an immediate"
) ? void (0) : __assert_fail ("MOSubIdx.isImm() && \"One of the subindex of the reg_sequence is not an immediate\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1475, __extension__
__PRETTY_FUNCTION__))
1475 "One of the subindex of the reg_sequence is not an immediate")(static_cast <bool> (MOSubIdx.isImm() && "One of the subindex of the reg_sequence is not an immediate"
) ? void (0) : __assert_fail ("MOSubIdx.isImm() && \"One of the subindex of the reg_sequence is not an immediate\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1475, __extension__
__PRETTY_FUNCTION__))
;
1476 BaseReg.Reg = MOBaseReg.getReg();
1477 BaseReg.SubReg = MOBaseReg.getSubReg();
1478
1479 InsertedReg.Reg = MOInsertedReg.getReg();
1480 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1481 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1482 return true;
1483}
1484
1485// Returns a MIRPrinter comment for this machine operand.
1486std::string TargetInstrInfo::createMIROperandComment(
1487 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1488 const TargetRegisterInfo *TRI) const {
1489
1490 if (!MI.isInlineAsm())
1491 return "";
1492
1493 std::string Flags;
1494 raw_string_ostream OS(Flags);
1495
1496 if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
1497 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1498 unsigned ExtraInfo = Op.getImm();
1499 bool First = true;
1500 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
1501 if (!First)
1502 OS << " ";
1503 First = false;
1504 OS << Info;
1505 }
1506
1507 return OS.str();
1508 }
1509
1510 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
1511 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
1512 return "";
1513
1514 assert(Op.isImm() && "Expected flag operand to be an immediate")(static_cast <bool> (Op.isImm() && "Expected flag operand to be an immediate"
) ? void (0) : __assert_fail ("Op.isImm() && \"Expected flag operand to be an immediate\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1514, __extension__
__PRETTY_FUNCTION__))
;
1515 // Pretty print the inline asm operand descriptor.
1516 unsigned Flag = Op.getImm();
1517 unsigned Kind = InlineAsm::getKind(Flag);
1518 OS << InlineAsm::getKindName(Kind);
1519
1520 unsigned RCID = 0;
1521 if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1522 InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1523 if (TRI) {
1524 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1525 } else
1526 OS << ":RC" << RCID;
1527 }
1528
1529 if (InlineAsm::isMemKind(Flag)) {
1530 unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1531 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1532 }
1533
1534 unsigned TiedTo = 0;
1535 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1536 OS << " tiedto:$" << TiedTo;
1537
1538 return OS.str();
1539}
1540
1541TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() = default;
1542
1543void TargetInstrInfo::mergeOutliningCandidateAttributes(
1544 Function &F, std::vector<outliner::Candidate> &Candidates) const {
1545 // Include target features from an arbitrary candidate for the outlined
1546 // function. This makes sure the outlined function knows what kinds of
1547 // instructions are going into it. This is fine, since all parent functions
1548 // must necessarily support the instructions that are in the outlined region.
1549 outliner::Candidate &FirstCand = Candidates.front();
1550 const Function &ParentFn = FirstCand.getMF()->getFunction();
1551 if (ParentFn.hasFnAttribute("target-features"))
1552 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
1553 if (ParentFn.hasFnAttribute("target-cpu"))
1554 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
1555
1556 // Set nounwind, so we don't generate eh_frame.
1557 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
1558 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
1559 }))
1560 F.addFnAttr(Attribute::NoUnwind);
1561}
1562
1563outliner::InstrType TargetInstrInfo::getOutliningType(
1564 MachineBasicBlock::iterator &MIT, unsigned Flags) const {
1565 MachineInstr &MI = *MIT;
1566
1567 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
1568 // have support for outlining those. Special-case that here.
1569 if (MI.isCFIInstruction())
1570 // Just go right to the target implementation.
1571 return getOutliningTypeImpl(MIT, Flags);
1572
1573 // Don't allow instructions that don't materialize to impact analysis.
1574 if (MI.isMetaInstruction())
1575 return outliner::InstrType::Invisible;
1576
1577 // Be conservative about inline assembly.
1578 if (MI.isInlineAsm())
1579 return outliner::InstrType::Illegal;
1580
1581 // Labels generally can't safely be outlined.
1582 if (MI.isLabel())
1583 return outliner::InstrType::Illegal;
1584
1585 // Is this a terminator for a basic block?
1586 if (MI.isTerminator()) {
1587 // If this is a branch to another block, we can't outline it.
1588 if (!MI.getParent()->succ_empty())
1589 return outliner::InstrType::Illegal;
1590
1591 // Don't outline if the branch is not unconditional.
1592 if (isPredicated(MI))
1593 return outliner::InstrType::Illegal;
1594 }
1595
1596 // Make sure none of the operands of this instruction do anything that
1597 // might break if they're moved outside their current function.
1598 // This includes MachineBasicBlock references, BlockAddressses,
1599 // Constant pool indices and jump table indices.
1600 //
1601 // A quick note on MO_TargetIndex:
1602 // This doesn't seem to be used in any of the architectures that the
1603 // MachineOutliner supports, but it was still filtered out in all of them.
1604 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
1605 // As such, this check is removed both here and in the target-specific
1606 // implementations. Instead, we assert to make sure this doesn't
1607 // catch anyone off-guard somewhere down the line.
1608 for (const MachineOperand &MOP : MI.operands()) {
1609 // If you hit this assertion, please remove it and adjust
1610 // `getOutliningTypeImpl` for your target appropriately if necessary.
1611 // Adding the assertion back to other supported architectures
1612 // would be nice too :)
1613 assert(!MOP.isTargetIndex() && "This isn't used quite yet!")(static_cast <bool> (!MOP.isTargetIndex() && "This isn't used quite yet!"
) ? void (0) : __assert_fail ("!MOP.isTargetIndex() && \"This isn't used quite yet!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1613, __extension__
__PRETTY_FUNCTION__))
;
1614
1615 // CFI instructions should already have been filtered out at this point.
1616 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!")(static_cast <bool> (!MOP.isCFIIndex() && "CFI instructions handled elsewhere!"
) ? void (0) : __assert_fail ("!MOP.isCFIIndex() && \"CFI instructions handled elsewhere!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1616, __extension__
__PRETTY_FUNCTION__))
;
1617
1618 // PrologEpilogInserter should've already run at this point.
1619 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!")(static_cast <bool> (!MOP.isFI() && "FrameIndex instructions should be gone by now!"
) ? void (0) : __assert_fail ("!MOP.isFI() && \"FrameIndex instructions should be gone by now!\""
, "llvm/lib/CodeGen/TargetInstrInfo.cpp", 1619, __extension__
__PRETTY_FUNCTION__))
;
1620
1621 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
1622 return outliner::InstrType::Illegal;
1623 }
1624
1625 // If we don't know, delegate to the target-specific hook.
1626 return getOutliningTypeImpl(MIT, Flags);
1627}
1628
1629bool TargetInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
1630 unsigned &Flags) const {
1631 // Some instrumentations create special TargetOpcode at the start which
1632 // expands to special code sequences which must be present.
1633 auto First = MBB.getFirstNonDebugInstr();
1634 if (First != MBB.end() &&
1635 (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
1636 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER))
1637 return false;
1638
1639 return true;
1640}