LLVM 18.0.0git
AMDGPUCombinerHelper.cpp
Go to the documentation of this file.
1//=== lib/CodeGen/GlobalISel/AMDGPUCombinerHelper.cpp ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "GCNSubtarget.h"
14#include "llvm/IR/IntrinsicsAMDGPU.h"
16
17using namespace llvm;
18using namespace MIPatternMatch;
19
21static bool fnegFoldsIntoMI(const MachineInstr &MI) {
22 switch (MI.getOpcode()) {
23 case AMDGPU::G_FADD:
24 case AMDGPU::G_FSUB:
25 case AMDGPU::G_FMUL:
26 case AMDGPU::G_FMA:
27 case AMDGPU::G_FMAD:
28 case AMDGPU::G_FMINNUM:
29 case AMDGPU::G_FMAXNUM:
30 case AMDGPU::G_FMINNUM_IEEE:
31 case AMDGPU::G_FMAXNUM_IEEE:
32 case AMDGPU::G_FSIN:
33 case AMDGPU::G_FPEXT:
34 case AMDGPU::G_INTRINSIC_TRUNC:
35 case AMDGPU::G_FPTRUNC:
36 case AMDGPU::G_FRINT:
37 case AMDGPU::G_FNEARBYINT:
38 case AMDGPU::G_INTRINSIC_ROUND:
39 case AMDGPU::G_INTRINSIC_ROUNDEVEN:
40 case AMDGPU::G_FCANONICALIZE:
41 case AMDGPU::G_AMDGPU_RCP_IFLAG:
42 case AMDGPU::G_AMDGPU_FMIN_LEGACY:
43 case AMDGPU::G_AMDGPU_FMAX_LEGACY:
44 return true;
45 case AMDGPU::G_INTRINSIC: {
46 unsigned IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
47 switch (IntrinsicID) {
48 case Intrinsic::amdgcn_rcp:
49 case Intrinsic::amdgcn_rcp_legacy:
50 case Intrinsic::amdgcn_sin:
51 case Intrinsic::amdgcn_fmul_legacy:
52 case Intrinsic::amdgcn_fmed3:
53 case Intrinsic::amdgcn_fma_legacy:
54 return true;
55 default:
56 return false;
57 }
58 }
59 default:
60 return false;
61 }
62}
63
64/// \p returns true if the operation will definitely need to use a 64-bit
65/// encoding, and thus will use a VOP3 encoding regardless of the source
66/// modifiers.
69 const MachineRegisterInfo &MRI) {
70 return MI.getNumOperands() > (isa<GIntrinsic>(MI) ? 4u : 3u) ||
71 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits() == 64;
72}
73
74// Most FP instructions support source modifiers.
76static bool hasSourceMods(const MachineInstr &MI) {
77 if (!MI.memoperands().empty())
78 return false;
79
80 switch (MI.getOpcode()) {
81 case AMDGPU::COPY:
82 case AMDGPU::G_SELECT:
83 case AMDGPU::G_FDIV:
84 case AMDGPU::G_FREM:
85 case TargetOpcode::INLINEASM:
86 case TargetOpcode::INLINEASM_BR:
87 case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS:
88 case AMDGPU::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
89 case AMDGPU::G_BITCAST:
90 case AMDGPU::G_ANYEXT:
91 case AMDGPU::G_BUILD_VECTOR:
92 case AMDGPU::G_BUILD_VECTOR_TRUNC:
93 case AMDGPU::G_PHI:
94 return false;
95 case AMDGPU::G_INTRINSIC:
96 case AMDGPU::G_INTRINSIC_CONVERGENT: {
97 unsigned IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
98 switch (IntrinsicID) {
99 case Intrinsic::amdgcn_interp_p1:
100 case Intrinsic::amdgcn_interp_p2:
101 case Intrinsic::amdgcn_interp_mov:
102 case Intrinsic::amdgcn_interp_p1_f16:
103 case Intrinsic::amdgcn_interp_p2_f16:
104 case Intrinsic::amdgcn_div_scale:
105 return false;
106 default:
107 return true;
108 }
109 }
110 default:
111 return true;
112 }
113}
114
116 unsigned CostThreshold = 4) {
117 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
118 // it is truly free to use a source modifier in all cases. If there are
119 // multiple users but for each one will necessitate using VOP3, there will be
120 // a code size increase. Try to avoid increasing code size unless we know it
121 // will save on the instruction count.
122 unsigned NumMayIncreaseSize = 0;
123 Register Dst = MI.getOperand(0).getReg();
124 for (const MachineInstr &Use : MRI.use_nodbg_instructions(Dst)) {
125 if (!hasSourceMods(Use))
126 return false;
127
129 if (++NumMayIncreaseSize > CostThreshold)
130 return false;
131 }
132 }
133 return true;
134}
135
137 const TargetOptions &Options = MI.getMF()->getTarget().Options;
138 return Options.NoSignedZerosFPMath || MI.getFlag(MachineInstr::MIFlag::FmNsz);
139}
140
141static bool isInv2Pi(const APFloat &APF) {
142 static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
143 static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
144 static const APFloat KF64(APFloat::IEEEdouble(),
145 APInt(64, 0x3fc45f306dc9c882));
146
147 return APF.bitwiseIsEqual(KF16) || APF.bitwiseIsEqual(KF32) ||
148 APF.bitwiseIsEqual(KF64);
149}
150
151// 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
152// additional cost to negate them.
155 std::optional<FPValueAndVReg> FPValReg;
156 if (mi_match(Reg, MRI, m_GFCstOrSplat(FPValReg))) {
157 if (FPValReg->Value.isZero() && !FPValReg->Value.isNegative())
158 return true;
159
160 const GCNSubtarget &ST = MI.getMF()->getSubtarget<GCNSubtarget>();
161 if (ST.hasInv2PiInlineImm() && isInv2Pi(FPValReg->Value))
162 return true;
163 }
164 return false;
165}
166
167static unsigned inverseMinMax(unsigned Opc) {
168 switch (Opc) {
169 case AMDGPU::G_FMAXNUM:
170 return AMDGPU::G_FMINNUM;
171 case AMDGPU::G_FMINNUM:
172 return AMDGPU::G_FMAXNUM;
173 case AMDGPU::G_FMAXNUM_IEEE:
174 return AMDGPU::G_FMINNUM_IEEE;
175 case AMDGPU::G_FMINNUM_IEEE:
176 return AMDGPU::G_FMAXNUM_IEEE;
177 case AMDGPU::G_AMDGPU_FMAX_LEGACY:
178 return AMDGPU::G_AMDGPU_FMIN_LEGACY;
179 case AMDGPU::G_AMDGPU_FMIN_LEGACY:
180 return AMDGPU::G_AMDGPU_FMAX_LEGACY;
181 default:
182 llvm_unreachable("invalid min/max opcode");
183 }
184}
185
187 MachineInstr *&MatchInfo) {
188 Register Src = MI.getOperand(1).getReg();
189 MatchInfo = MRI.getVRegDef(Src);
190
191 // If the input has multiple uses and we can either fold the negate down, or
192 // the other uses cannot, give up. This both prevents unprofitable
193 // transformations and infinite loops: we won't repeatedly try to fold around
194 // a negate that has no 'good' form.
195 if (MRI.hasOneNonDBGUse(Src)) {
197 return false;
198 } else {
199 if (fnegFoldsIntoMI(*MatchInfo) &&
201 !allUsesHaveSourceMods(*MatchInfo, MRI)))
202 return false;
203 }
204
205 switch (MatchInfo->getOpcode()) {
206 case AMDGPU::G_FMINNUM:
207 case AMDGPU::G_FMAXNUM:
208 case AMDGPU::G_FMINNUM_IEEE:
209 case AMDGPU::G_FMAXNUM_IEEE:
210 case AMDGPU::G_AMDGPU_FMIN_LEGACY:
211 case AMDGPU::G_AMDGPU_FMAX_LEGACY:
212 // 0 doesn't have a negated inline immediate.
213 return !isConstantCostlierToNegate(*MatchInfo,
214 MatchInfo->getOperand(2).getReg(), MRI);
215 case AMDGPU::G_FADD:
216 case AMDGPU::G_FSUB:
217 case AMDGPU::G_FMA:
218 case AMDGPU::G_FMAD:
219 return mayIgnoreSignedZero(*MatchInfo);
220 case AMDGPU::G_FMUL:
221 case AMDGPU::G_FPEXT:
222 case AMDGPU::G_INTRINSIC_TRUNC:
223 case AMDGPU::G_FPTRUNC:
224 case AMDGPU::G_FRINT:
225 case AMDGPU::G_FNEARBYINT:
226 case AMDGPU::G_INTRINSIC_ROUND:
227 case AMDGPU::G_INTRINSIC_ROUNDEVEN:
228 case AMDGPU::G_FSIN:
229 case AMDGPU::G_FCANONICALIZE:
230 case AMDGPU::G_AMDGPU_RCP_IFLAG:
231 return true;
232 case AMDGPU::G_INTRINSIC:
233 case AMDGPU::G_INTRINSIC_CONVERGENT: {
234 unsigned IntrinsicID = cast<GIntrinsic>(MatchInfo)->getIntrinsicID();
235 switch (IntrinsicID) {
236 case Intrinsic::amdgcn_rcp:
237 case Intrinsic::amdgcn_rcp_legacy:
238 case Intrinsic::amdgcn_sin:
239 case Intrinsic::amdgcn_fmul_legacy:
240 case Intrinsic::amdgcn_fmed3:
241 return true;
242 case Intrinsic::amdgcn_fma_legacy:
243 return mayIgnoreSignedZero(*MatchInfo);
244 default:
245 return false;
246 }
247 }
248 default:
249 return false;
250 }
251}
252
254 MachineInstr *&MatchInfo) {
255 // Transform:
256 // %A = inst %Op1, ...
257 // %B = fneg %A
258 //
259 // into:
260 //
261 // (if %A has one use, specifically fneg above)
262 // %B = inst (maybe fneg %Op1), ...
263 //
264 // (if %A has multiple uses)
265 // %B = inst (maybe fneg %Op1), ...
266 // %A = fneg %B
267
268 // Replace register in operand with a register holding negated value.
269 auto NegateOperand = [&](MachineOperand &Op) {
270 Register Reg = Op.getReg();
271 if (!mi_match(Reg, MRI, m_GFNeg(m_Reg(Reg))))
272 Reg = Builder.buildFNeg(MRI.getType(Reg), Reg).getReg(0);
273 replaceRegOpWith(MRI, Op, Reg);
274 };
275
276 // Replace either register in operands with a register holding negated value.
277 auto NegateEitherOperand = [&](MachineOperand &X, MachineOperand &Y) {
278 Register XReg = X.getReg();
279 Register YReg = Y.getReg();
280 if (mi_match(XReg, MRI, m_GFNeg(m_Reg(XReg))))
281 replaceRegOpWith(MRI, X, XReg);
282 else if (mi_match(YReg, MRI, m_GFNeg(m_Reg(YReg))))
283 replaceRegOpWith(MRI, Y, YReg);
284 else {
285 YReg = Builder.buildFNeg(MRI.getType(YReg), YReg).getReg(0);
286 replaceRegOpWith(MRI, Y, YReg);
287 }
288 };
289
290 Builder.setInstrAndDebugLoc(*MatchInfo);
291
292 // Negate appropriate operands so that resulting value of MatchInfo is
293 // negated.
294 switch (MatchInfo->getOpcode()) {
295 case AMDGPU::G_FADD:
296 case AMDGPU::G_FSUB:
297 NegateOperand(MatchInfo->getOperand(1));
298 NegateOperand(MatchInfo->getOperand(2));
299 break;
300 case AMDGPU::G_FMUL:
301 NegateEitherOperand(MatchInfo->getOperand(1), MatchInfo->getOperand(2));
302 break;
303 case AMDGPU::G_FMINNUM:
304 case AMDGPU::G_FMAXNUM:
305 case AMDGPU::G_FMINNUM_IEEE:
306 case AMDGPU::G_FMAXNUM_IEEE:
307 case AMDGPU::G_AMDGPU_FMIN_LEGACY:
308 case AMDGPU::G_AMDGPU_FMAX_LEGACY: {
309 NegateOperand(MatchInfo->getOperand(1));
310 NegateOperand(MatchInfo->getOperand(2));
311 unsigned Opposite = inverseMinMax(MatchInfo->getOpcode());
312 replaceOpcodeWith(*MatchInfo, Opposite);
313 break;
314 }
315 case AMDGPU::G_FMA:
316 case AMDGPU::G_FMAD:
317 NegateEitherOperand(MatchInfo->getOperand(1), MatchInfo->getOperand(2));
318 NegateOperand(MatchInfo->getOperand(3));
319 break;
320 case AMDGPU::G_FPEXT:
321 case AMDGPU::G_INTRINSIC_TRUNC:
322 case AMDGPU::G_FRINT:
323 case AMDGPU::G_FNEARBYINT:
324 case AMDGPU::G_INTRINSIC_ROUND:
325 case AMDGPU::G_INTRINSIC_ROUNDEVEN:
326 case AMDGPU::G_FSIN:
327 case AMDGPU::G_FCANONICALIZE:
328 case AMDGPU::G_AMDGPU_RCP_IFLAG:
329 case AMDGPU::G_FPTRUNC:
330 NegateOperand(MatchInfo->getOperand(1));
331 break;
332 case AMDGPU::G_INTRINSIC:
333 case AMDGPU::G_INTRINSIC_CONVERGENT: {
334 unsigned IntrinsicID = cast<GIntrinsic>(MatchInfo)->getIntrinsicID();
335 switch (IntrinsicID) {
336 case Intrinsic::amdgcn_rcp:
337 case Intrinsic::amdgcn_rcp_legacy:
338 case Intrinsic::amdgcn_sin:
339 NegateOperand(MatchInfo->getOperand(2));
340 break;
341 case Intrinsic::amdgcn_fmul_legacy:
342 NegateEitherOperand(MatchInfo->getOperand(2), MatchInfo->getOperand(3));
343 break;
344 case Intrinsic::amdgcn_fmed3:
345 NegateOperand(MatchInfo->getOperand(2));
346 NegateOperand(MatchInfo->getOperand(3));
347 NegateOperand(MatchInfo->getOperand(4));
348 break;
349 case Intrinsic::amdgcn_fma_legacy:
350 NegateEitherOperand(MatchInfo->getOperand(2), MatchInfo->getOperand(3));
351 NegateOperand(MatchInfo->getOperand(4));
352 break;
353 default:
354 llvm_unreachable("folding fneg not supported for this intrinsic");
355 }
356 break;
357 }
358 default:
359 llvm_unreachable("folding fneg not supported for this instruction");
360 }
361
362 Register Dst = MI.getOperand(0).getReg();
363 Register MatchInfoDst = MatchInfo->getOperand(0).getReg();
364
365 if (MRI.hasOneNonDBGUse(MatchInfoDst)) {
366 // MatchInfo now has negated value so use that instead of old Dst.
367 replaceRegWith(MRI, Dst, MatchInfoDst);
368 } else {
369 // We want to swap all uses of Dst with uses of MatchInfoDst and vice versa
370 // but replaceRegWith will replace defs as well. It is easier to replace one
371 // def with a new register.
372 LLT Type = MRI.getType(Dst);
373 Register NegatedMatchInfo = MRI.createGenericVirtualRegister(Type);
374 replaceRegOpWith(MRI, MatchInfo->getOperand(0), NegatedMatchInfo);
375
376 // MatchInfo now has negated value so use that instead of old Dst.
377 replaceRegWith(MRI, Dst, NegatedMatchInfo);
378
379 // Recreate non negated value for other uses of old MatchInfoDst
380 auto NextInst = ++MatchInfo->getIterator();
381 Builder.setInstrAndDebugLoc(*NextInst);
382 Builder.buildFNeg(MatchInfoDst, NegatedMatchInfo, MI.getFlags());
383 }
384
385 MI.eraseFromParent();
386}
387
388// TODO: Should return converted value / extension source and avoid introducing
389// intermediate fptruncs in the apply function.
391 Register Reg) {
392 const MachineInstr *Def = MRI.getVRegDef(Reg);
393 if (Def->getOpcode() == TargetOpcode::G_FPEXT) {
394 Register SrcReg = Def->getOperand(1).getReg();
395 return MRI.getType(SrcReg) == LLT::scalar(16);
396 }
397
398 if (Def->getOpcode() == TargetOpcode::G_FCONSTANT) {
399 APFloat Val = Def->getOperand(1).getFPImm()->getValueAPF();
400 bool LosesInfo = true;
401 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &LosesInfo);
402 return !LosesInfo;
403 }
404
405 return false;
406}
407
409 Register Src0,
410 Register Src1,
411 Register Src2) {
412 assert(MI.getOpcode() == TargetOpcode::G_FPTRUNC);
413 Register SrcReg = MI.getOperand(1).getReg();
414 if (!MRI.hasOneNonDBGUse(SrcReg) || MRI.getType(SrcReg) != LLT::scalar(32))
415 return false;
416
417 return isFPExtFromF16OrConst(MRI, Src0) && isFPExtFromF16OrConst(MRI, Src1) &&
419}
420
422 Register Src0,
423 Register Src1,
424 Register Src2) {
426
427 // We expect fptrunc (fpext x) to fold out, and to constant fold any constant
428 // sources.
429 Src0 = Builder.buildFPTrunc(LLT::scalar(16), Src0).getReg(0);
430 Src1 = Builder.buildFPTrunc(LLT::scalar(16), Src1).getReg(0);
431 Src2 = Builder.buildFPTrunc(LLT::scalar(16), Src2).getReg(0);
432
433 LLT Ty = MRI.getType(Src0);
434 auto A1 = Builder.buildFMinNumIEEE(Ty, Src0, Src1);
435 auto B1 = Builder.buildFMaxNumIEEE(Ty, Src0, Src1);
436 auto C1 = Builder.buildFMaxNumIEEE(Ty, A1, Src2);
437 Builder.buildFMinNumIEEE(MI.getOperand(0), B1, C1);
438 MI.eraseFromParent();
439}
unsigned const MachineRegisterInfo * MRI
static LLVM_READONLY bool hasSourceMods(const MachineInstr &MI)
static bool isInv2Pi(const APFloat &APF)
static bool isFPExtFromF16OrConst(const MachineRegisterInfo &MRI, Register Reg)
static bool mayIgnoreSignedZero(MachineInstr &MI)
static bool isConstantCostlierToNegate(MachineInstr &MI, Register Reg, MachineRegisterInfo &MRI)
static bool allUsesHaveSourceMods(MachineInstr &MI, MachineRegisterInfo &MRI, unsigned CostThreshold=4)
static LLVM_READONLY bool opMustUseVOP3Encoding(const MachineInstr &MI, const MachineRegisterInfo &MRI)
returns true if the operation will definitely need to use a 64-bit encoding, and thus will use a VOP3...
static unsigned inverseMinMax(unsigned Opc)
static LLVM_READNONE bool fnegFoldsIntoMI(const MachineInstr &MI)
This contains common combine transformations that may be used in a combine pass.
Provides AMDGPU specific target descriptions.
#define LLVM_READNONE
Definition: Compiler.h:201
#define LLVM_READONLY
Definition: Compiler.h:208
static cl::opt< unsigned > CostThreshold("dfa-cost-threshold", cl::desc("Maximum cost accepted for the transformation"), cl::Hidden, cl::init(50))
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
AMD GCN specific subclass of TargetSubtarget.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
static LVOptions Options
Definition: LVOptions.cpp:25
Contains matchers for matching SSA Machine Instructions.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getScalarSizeInBits(Type *Ty)
void applyExpandPromotedF16FMed3(MachineInstr &MI, Register Src0, Register Src1, Register Src2)
bool matchFoldableFneg(MachineInstr &MI, MachineInstr *&MatchInfo)
bool matchExpandPromotedF16FMed3(MachineInstr &MI, Register Src0, Register Src1, Register Src2)
void applyFoldableFneg(MachineInstr &MI, MachineInstr *&MatchInfo)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:5196
bool bitwiseIsEqual(const APFloat &RHS) const
Definition: APFloat.h:1258
Class for arbitrary precision integers.
Definition: APInt.h:76
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, Register ToReg) const
Replace a single register operand with a new register and inform the observer of the changes.
void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const
Replace the opcode in instruction with a new opcode and inform the observer of the changes.
MachineRegisterInfo & MRI
MachineIRBuilder & Builder
This class represents an Operation in the Expression.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
MachineInstrBuilder buildFMinNumIEEE(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
MachineInstrBuilder buildFNeg(const DstOp &Dst, const SrcOp &Src0, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FNEG Op0.
MachineInstrBuilder buildFMaxNumIEEE(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:543
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:553
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
self_iterator getIterator()
Definition: ilist_node.h:82
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
operand_type_match m_Reg()
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
UnaryOp_match< SrcTy, TargetOpcode::G_FNEG > m_GFNeg(const SrcTy &Src)
GFCstOrSplatGFCstMatch m_GFCstOrSplat(std::optional< FPValueAndVReg > &FPValReg)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
DWARFExpression::Operation Op