Bug Summary

File:build/source/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
Warning:line 4659, column 16
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CombinerHelper.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm -resource-dir /usr/lib/llvm-17/lib/clang/17 -I lib/CodeGen/GlobalISel -I /build/source/llvm/lib/CodeGen/GlobalISel -I include -I /build/source/llvm/include -D _DEBUG -D _GLIBCXX_ASSERTIONS -D _GNU_SOURCE -D _LIBCPP_ENABLE_ASSERTIONS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm=build-llvm -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm=build-llvm -fcoverage-prefix-map=/build/source/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-05-10-133810-16478-1 -x c++ /build/source/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
1//===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9#include "llvm/ADT/SetVector.h"
10#include "llvm/ADT/SmallBitVector.h"
11#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
12#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
13#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
14#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
15#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
16#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
17#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18#include "llvm/CodeGen/GlobalISel/Utils.h"
19#include "llvm/CodeGen/LowLevelTypeUtils.h"
20#include "llvm/CodeGen/MachineBasicBlock.h"
21#include "llvm/CodeGen/MachineDominators.h"
22#include "llvm/CodeGen/MachineInstr.h"
23#include "llvm/CodeGen/MachineMemOperand.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/RegisterBankInfo.h"
26#include "llvm/CodeGen/TargetInstrInfo.h"
27#include "llvm/CodeGen/TargetLowering.h"
28#include "llvm/CodeGen/TargetOpcodes.h"
29#include "llvm/IR/DataLayout.h"
30#include "llvm/IR/InstrTypes.h"
31#include "llvm/Support/Casting.h"
32#include "llvm/Support/DivisionByConstantInfo.h"
33#include "llvm/Support/MathExtras.h"
34#include "llvm/Target/TargetMachine.h"
35#include <cmath>
36#include <optional>
37#include <tuple>
38
39#define DEBUG_TYPE"gi-combiner" "gi-combiner"
40
41using namespace llvm;
42using namespace MIPatternMatch;
43
44// Option to allow testing of the combiner while no targets know about indexed
45// addressing.
46static cl::opt<bool>
47 ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
48 cl::desc("Force all indexed operations to be "
49 "legal for the GlobalISel combiner"));
50
51CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
52 MachineIRBuilder &B, bool IsPreLegalize,
53 GISelKnownBits *KB, MachineDominatorTree *MDT,
54 const LegalizerInfo *LI)
55 : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB),
56 MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI),
57 RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
58 TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
59 (void)this->KB;
60}
61
62const TargetLowering &CombinerHelper::getTargetLowering() const {
63 return *Builder.getMF().getSubtarget().getTargetLowering();
64}
65
66/// \returns The little endian in-memory byte position of byte \p I in a
67/// \p ByteWidth bytes wide type.
68///
69/// E.g. Given a 4-byte type x, x[0] -> byte 0
70static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) {
71 assert(I < ByteWidth && "I must be in [0, ByteWidth)")(static_cast <bool> (I < ByteWidth && "I must be in [0, ByteWidth)"
) ? void (0) : __assert_fail ("I < ByteWidth && \"I must be in [0, ByteWidth)\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 71, __extension__
__PRETTY_FUNCTION__))
;
72 return I;
73}
74
75/// Determines the LogBase2 value for a non-null input value using the
76/// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
77static Register buildLogBase2(Register V, MachineIRBuilder &MIB) {
78 auto &MRI = *MIB.getMRI();
79 LLT Ty = MRI.getType(V);
80 auto Ctlz = MIB.buildCTLZ(Ty, V);
81 auto Base = MIB.buildConstant(Ty, Ty.getScalarSizeInBits() - 1);
82 return MIB.buildSub(Ty, Base, Ctlz).getReg(0);
83}
84
85/// \returns The big endian in-memory byte position of byte \p I in a
86/// \p ByteWidth bytes wide type.
87///
88/// E.g. Given a 4-byte type x, x[0] -> byte 3
89static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) {
90 assert(I < ByteWidth && "I must be in [0, ByteWidth)")(static_cast <bool> (I < ByteWidth && "I must be in [0, ByteWidth)"
) ? void (0) : __assert_fail ("I < ByteWidth && \"I must be in [0, ByteWidth)\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 90, __extension__
__PRETTY_FUNCTION__))
;
91 return ByteWidth - I - 1;
92}
93
94/// Given a map from byte offsets in memory to indices in a load/store,
95/// determine if that map corresponds to a little or big endian byte pattern.
96///
97/// \param MemOffset2Idx maps memory offsets to address offsets.
98/// \param LowestIdx is the lowest index in \p MemOffset2Idx.
99///
100/// \returns true if the map corresponds to a big endian byte pattern, false if
101/// it corresponds to a little endian byte pattern, and std::nullopt otherwise.
102///
103/// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns
104/// are as follows:
105///
106/// AddrOffset Little endian Big endian
107/// 0 0 3
108/// 1 1 2
109/// 2 2 1
110/// 3 3 0
111static std::optional<bool>
112isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
113 int64_t LowestIdx) {
114 // Need at least two byte positions to decide on endianness.
115 unsigned Width = MemOffset2Idx.size();
116 if (Width < 2)
117 return std::nullopt;
118 bool BigEndian = true, LittleEndian = true;
119 for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
120 auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset);
121 if (MemOffsetAndIdx == MemOffset2Idx.end())
122 return std::nullopt;
123 const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
124 assert(Idx >= 0 && "Expected non-negative byte offset?")(static_cast <bool> (Idx >= 0 && "Expected non-negative byte offset?"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Expected non-negative byte offset?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 124, __extension__
__PRETTY_FUNCTION__))
;
125 LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset);
126 BigEndian &= Idx == bigEndianByteAt(Width, MemOffset);
127 if (!BigEndian && !LittleEndian)
128 return std::nullopt;
129 }
130
131 assert((BigEndian != LittleEndian) &&(static_cast <bool> ((BigEndian != LittleEndian) &&
"Pattern cannot be both big and little endian!") ? void (0) :
__assert_fail ("(BigEndian != LittleEndian) && \"Pattern cannot be both big and little endian!\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 132, __extension__
__PRETTY_FUNCTION__))
132 "Pattern cannot be both big and little endian!")(static_cast <bool> ((BigEndian != LittleEndian) &&
"Pattern cannot be both big and little endian!") ? void (0) :
__assert_fail ("(BigEndian != LittleEndian) && \"Pattern cannot be both big and little endian!\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 132, __extension__
__PRETTY_FUNCTION__))
;
133 return BigEndian;
134}
135
136bool CombinerHelper::isPreLegalize() const { return IsPreLegalize; }
137
138bool CombinerHelper::isLegal(const LegalityQuery &Query) const {
139 assert(LI && "Must have LegalizerInfo to query isLegal!")(static_cast <bool> (LI && "Must have LegalizerInfo to query isLegal!"
) ? void (0) : __assert_fail ("LI && \"Must have LegalizerInfo to query isLegal!\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 139, __extension__
__PRETTY_FUNCTION__))
;
140 return LI->getAction(Query).Action == LegalizeActions::Legal;
141}
142
143bool CombinerHelper::isLegalOrBeforeLegalizer(
144 const LegalityQuery &Query) const {
145 return isPreLegalize() || isLegal(Query);
146}
147
148bool CombinerHelper::isConstantLegalOrBeforeLegalizer(const LLT Ty) const {
149 if (!Ty.isVector())
150 return isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {Ty}});
151 // Vector constants are represented as a G_BUILD_VECTOR of scalar G_CONSTANTs.
152 if (isPreLegalize())
153 return true;
154 LLT EltTy = Ty.getElementType();
155 return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
156 isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
157}
158
159void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
160 Register ToReg) const {
161 Observer.changingAllUsesOfReg(MRI, FromReg);
162
163 if (MRI.constrainRegAttrs(ToReg, FromReg))
164 MRI.replaceRegWith(FromReg, ToReg);
165 else
166 Builder.buildCopy(ToReg, FromReg);
167
168 Observer.finishedChangingAllUsesOfReg();
169}
170
171void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
172 MachineOperand &FromRegOp,
173 Register ToReg) const {
174 assert(FromRegOp.getParent() && "Expected an operand in an MI")(static_cast <bool> (FromRegOp.getParent() && "Expected an operand in an MI"
) ? void (0) : __assert_fail ("FromRegOp.getParent() && \"Expected an operand in an MI\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 174, __extension__
__PRETTY_FUNCTION__))
;
175 Observer.changingInstr(*FromRegOp.getParent());
176
177 FromRegOp.setReg(ToReg);
178
179 Observer.changedInstr(*FromRegOp.getParent());
180}
181
182void CombinerHelper::replaceOpcodeWith(MachineInstr &FromMI,
183 unsigned ToOpcode) const {
184 Observer.changingInstr(FromMI);
185
186 FromMI.setDesc(Builder.getTII().get(ToOpcode));
187
188 Observer.changedInstr(FromMI);
189}
190
191const RegisterBank *CombinerHelper::getRegBank(Register Reg) const {
192 return RBI->getRegBank(Reg, MRI, *TRI);
193}
194
195void CombinerHelper::setRegBank(Register Reg, const RegisterBank *RegBank) {
196 if (RegBank)
197 MRI.setRegBank(Reg, *RegBank);
198}
199
200bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
201 if (matchCombineCopy(MI)) {
202 applyCombineCopy(MI);
203 return true;
204 }
205 return false;
206}
207bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
208 if (MI.getOpcode() != TargetOpcode::COPY)
209 return false;
210 Register DstReg = MI.getOperand(0).getReg();
211 Register SrcReg = MI.getOperand(1).getReg();
212 return canReplaceReg(DstReg, SrcReg, MRI);
213}
214void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
215 Register DstReg = MI.getOperand(0).getReg();
216 Register SrcReg = MI.getOperand(1).getReg();
217 MI.eraseFromParent();
218 replaceRegWith(MRI, DstReg, SrcReg);
219}
220
221bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
222 bool IsUndef = false;
223 SmallVector<Register, 4> Ops;
224 if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
225 applyCombineConcatVectors(MI, IsUndef, Ops);
226 return true;
227 }
228 return false;
229}
230
231bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
232 SmallVectorImpl<Register> &Ops) {
233 assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS
&& "Invalid instruction") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && \"Invalid instruction\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 234, __extension__
__PRETTY_FUNCTION__))
234 "Invalid instruction")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS
&& "Invalid instruction") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && \"Invalid instruction\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 234, __extension__
__PRETTY_FUNCTION__))
;
235 IsUndef = true;
236 MachineInstr *Undef = nullptr;
237
238 // Walk over all the operands of concat vectors and check if they are
239 // build_vector themselves or undef.
240 // Then collect their operands in Ops.
241 for (const MachineOperand &MO : MI.uses()) {
242 Register Reg = MO.getReg();
243 MachineInstr *Def = MRI.getVRegDef(Reg);
244 assert(Def && "Operand not defined")(static_cast <bool> (Def && "Operand not defined"
) ? void (0) : __assert_fail ("Def && \"Operand not defined\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 244, __extension__
__PRETTY_FUNCTION__))
;
245 switch (Def->getOpcode()) {
246 case TargetOpcode::G_BUILD_VECTOR:
247 IsUndef = false;
248 // Remember the operands of the build_vector to fold
249 // them into the yet-to-build flattened concat vectors.
250 for (const MachineOperand &BuildVecMO : Def->uses())
251 Ops.push_back(BuildVecMO.getReg());
252 break;
253 case TargetOpcode::G_IMPLICIT_DEF: {
254 LLT OpType = MRI.getType(Reg);
255 // Keep one undef value for all the undef operands.
256 if (!Undef) {
257 Builder.setInsertPt(*MI.getParent(), MI);
258 Undef = Builder.buildUndef(OpType.getScalarType());
259 }
260 assert(MRI.getType(Undef->getOperand(0).getReg()) ==(static_cast <bool> (MRI.getType(Undef->getOperand(0
).getReg()) == OpType.getScalarType() && "All undefs should have the same type"
) ? void (0) : __assert_fail ("MRI.getType(Undef->getOperand(0).getReg()) == OpType.getScalarType() && \"All undefs should have the same type\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 262, __extension__
__PRETTY_FUNCTION__))
261 OpType.getScalarType() &&(static_cast <bool> (MRI.getType(Undef->getOperand(0
).getReg()) == OpType.getScalarType() && "All undefs should have the same type"
) ? void (0) : __assert_fail ("MRI.getType(Undef->getOperand(0).getReg()) == OpType.getScalarType() && \"All undefs should have the same type\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 262, __extension__
__PRETTY_FUNCTION__))
262 "All undefs should have the same type")(static_cast <bool> (MRI.getType(Undef->getOperand(0
).getReg()) == OpType.getScalarType() && "All undefs should have the same type"
) ? void (0) : __assert_fail ("MRI.getType(Undef->getOperand(0).getReg()) == OpType.getScalarType() && \"All undefs should have the same type\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 262, __extension__
__PRETTY_FUNCTION__))
;
263 // Break the undef vector in as many scalar elements as needed
264 // for the flattening.
265 for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
266 EltIdx != EltEnd; ++EltIdx)
267 Ops.push_back(Undef->getOperand(0).getReg());
268 break;
269 }
270 default:
271 return false;
272 }
273 }
274 return true;
275}
276void CombinerHelper::applyCombineConcatVectors(
277 MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
278 // We determined that the concat_vectors can be flatten.
279 // Generate the flattened build_vector.
280 Register DstReg = MI.getOperand(0).getReg();
281 Builder.setInsertPt(*MI.getParent(), MI);
282 Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
283
284 // Note: IsUndef is sort of redundant. We could have determine it by
285 // checking that at all Ops are undef. Alternatively, we could have
286 // generate a build_vector of undefs and rely on another combine to
287 // clean that up. For now, given we already gather this information
288 // in tryCombineConcatVectors, just save compile time and issue the
289 // right thing.
290 if (IsUndef)
291 Builder.buildUndef(NewDstReg);
292 else
293 Builder.buildBuildVector(NewDstReg, Ops);
294 MI.eraseFromParent();
295 replaceRegWith(MRI, DstReg, NewDstReg);
296}
297
298bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
299 SmallVector<Register, 4> Ops;
300 if (matchCombineShuffleVector(MI, Ops)) {
301 applyCombineShuffleVector(MI, Ops);
302 return true;
303 }
304 return false;
305}
306
307bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
308 SmallVectorImpl<Register> &Ops) {
309 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR
&& "Invalid instruction kind") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && \"Invalid instruction kind\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 310, __extension__
__PRETTY_FUNCTION__))
310 "Invalid instruction kind")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR
&& "Invalid instruction kind") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && \"Invalid instruction kind\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 310, __extension__
__PRETTY_FUNCTION__))
;
311 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
312 Register Src1 = MI.getOperand(1).getReg();
313 LLT SrcType = MRI.getType(Src1);
314 // As bizarre as it may look, shuffle vector can actually produce
315 // scalar! This is because at the IR level a <1 x ty> shuffle
316 // vector is perfectly valid.
317 unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
318 unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
319
320 // If the resulting vector is smaller than the size of the source
321 // vectors being concatenated, we won't be able to replace the
322 // shuffle vector into a concat_vectors.
323 //
324 // Note: We may still be able to produce a concat_vectors fed by
325 // extract_vector_elt and so on. It is less clear that would
326 // be better though, so don't bother for now.
327 //
328 // If the destination is a scalar, the size of the sources doesn't
329 // matter. we will lower the shuffle to a plain copy. This will
330 // work only if the source and destination have the same size. But
331 // that's covered by the next condition.
332 //
333 // TODO: If the size between the source and destination don't match
334 // we could still emit an extract vector element in that case.
335 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
336 return false;
337
338 // Check that the shuffle mask can be broken evenly between the
339 // different sources.
340 if (DstNumElts % SrcNumElts != 0)
341 return false;
342
343 // Mask length is a multiple of the source vector length.
344 // Check if the shuffle is some kind of concatenation of the input
345 // vectors.
346 unsigned NumConcat = DstNumElts / SrcNumElts;
347 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
348 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
349 for (unsigned i = 0; i != DstNumElts; ++i) {
350 int Idx = Mask[i];
351 // Undef value.
352 if (Idx < 0)
353 continue;
354 // Ensure the indices in each SrcType sized piece are sequential and that
355 // the same source is used for the whole piece.
356 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
357 (ConcatSrcs[i / SrcNumElts] >= 0 &&
358 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
359 return false;
360 // Remember which source this index came from.
361 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
362 }
363
364 // The shuffle is concatenating multiple vectors together.
365 // Collect the different operands for that.
366 Register UndefReg;
367 Register Src2 = MI.getOperand(2).getReg();
368 for (auto Src : ConcatSrcs) {
369 if (Src < 0) {
370 if (!UndefReg) {
371 Builder.setInsertPt(*MI.getParent(), MI);
372 UndefReg = Builder.buildUndef(SrcType).getReg(0);
373 }
374 Ops.push_back(UndefReg);
375 } else if (Src == 0)
376 Ops.push_back(Src1);
377 else
378 Ops.push_back(Src2);
379 }
380 return true;
381}
382
383void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
384 const ArrayRef<Register> Ops) {
385 Register DstReg = MI.getOperand(0).getReg();
386 Builder.setInsertPt(*MI.getParent(), MI);
387 Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
388
389 if (Ops.size() == 1)
390 Builder.buildCopy(NewDstReg, Ops[0]);
391 else
392 Builder.buildMergeLikeInstr(NewDstReg, Ops);
393
394 MI.eraseFromParent();
395 replaceRegWith(MRI, DstReg, NewDstReg);
396}
397
398namespace {
399
400/// Select a preference between two uses. CurrentUse is the current preference
401/// while *ForCandidate is attributes of the candidate under consideration.
402PreferredTuple ChoosePreferredUse(MachineInstr &LoadMI,
403 PreferredTuple &CurrentUse,
404 const LLT TyForCandidate,
405 unsigned OpcodeForCandidate,
406 MachineInstr *MIForCandidate) {
407 if (!CurrentUse.Ty.isValid()) {
408 if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
409 CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
410 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
411 return CurrentUse;
412 }
413
414 // We permit the extend to hoist through basic blocks but this is only
415 // sensible if the target has extending loads. If you end up lowering back
416 // into a load and extend during the legalizer then the end result is
417 // hoisting the extend up to the load.
418
419 // Prefer defined extensions to undefined extensions as these are more
420 // likely to reduce the number of instructions.
421 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
422 CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
423 return CurrentUse;
424 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
425 OpcodeForCandidate != TargetOpcode::G_ANYEXT)
426 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
427
428 // Prefer sign extensions to zero extensions as sign-extensions tend to be
429 // more expensive. Don't do this if the load is already a zero-extend load
430 // though, otherwise we'll rewrite a zero-extend load into a sign-extend
431 // later.
432 if (!isa<GZExtLoad>(LoadMI) && CurrentUse.Ty == TyForCandidate) {
433 if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
434 OpcodeForCandidate == TargetOpcode::G_ZEXT)
435 return CurrentUse;
436 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
437 OpcodeForCandidate == TargetOpcode::G_SEXT)
438 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
439 }
440
441 // This is potentially target specific. We've chosen the largest type
442 // because G_TRUNC is usually free. One potential catch with this is that
443 // some targets have a reduced number of larger registers than smaller
444 // registers and this choice potentially increases the live-range for the
445 // larger value.
446 if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
447 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
448 }
449 return CurrentUse;
450}
451
452/// Find a suitable place to insert some instructions and insert them. This
453/// function accounts for special cases like inserting before a PHI node.
454/// The current strategy for inserting before PHI's is to duplicate the
455/// instructions for each predecessor. However, while that's ok for G_TRUNC
456/// on most targets since it generally requires no code, other targets/cases may
457/// want to try harder to find a dominating block.
458static void InsertInsnsWithoutSideEffectsBeforeUse(
459 MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
460 std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
461 MachineOperand &UseMO)>
462 Inserter) {
463 MachineInstr &UseMI = *UseMO.getParent();
464
465 MachineBasicBlock *InsertBB = UseMI.getParent();
466
467 // If the use is a PHI then we want the predecessor block instead.
468 if (UseMI.isPHI()) {
469 MachineOperand *PredBB = std::next(&UseMO);
470 InsertBB = PredBB->getMBB();
471 }
472
473 // If the block is the same block as the def then we want to insert just after
474 // the def instead of at the start of the block.
475 if (InsertBB == DefMI.getParent()) {
476 MachineBasicBlock::iterator InsertPt = &DefMI;
477 Inserter(InsertBB, std::next(InsertPt), UseMO);
478 return;
479 }
480
481 // Otherwise we want the start of the BB
482 Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
483}
484} // end anonymous namespace
485
486bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
487 PreferredTuple Preferred;
488 if (matchCombineExtendingLoads(MI, Preferred)) {
489 applyCombineExtendingLoads(MI, Preferred);
490 return true;
491 }
492 return false;
493}
494
495static unsigned getExtLoadOpcForExtend(unsigned ExtOpc) {
496 unsigned CandidateLoadOpc;
497 switch (ExtOpc) {
498 case TargetOpcode::G_ANYEXT:
499 CandidateLoadOpc = TargetOpcode::G_LOAD;
500 break;
501 case TargetOpcode::G_SEXT:
502 CandidateLoadOpc = TargetOpcode::G_SEXTLOAD;
503 break;
504 case TargetOpcode::G_ZEXT:
505 CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD;
506 break;
507 default:
508 llvm_unreachable("Unexpected extend opc")::llvm::llvm_unreachable_internal("Unexpected extend opc", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp"
, 508)
;
509 }
510 return CandidateLoadOpc;
511}
512
513bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
514 PreferredTuple &Preferred) {
515 // We match the loads and follow the uses to the extend instead of matching
516 // the extends and following the def to the load. This is because the load
517 // must remain in the same position for correctness (unless we also add code
518 // to find a safe place to sink it) whereas the extend is freely movable.
519 // It also prevents us from duplicating the load for the volatile case or just
520 // for performance.
521 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI);
522 if (!LoadMI)
523 return false;
524
525 Register LoadReg = LoadMI->getDstReg();
526
527 LLT LoadValueTy = MRI.getType(LoadReg);
528 if (!LoadValueTy.isScalar())
529 return false;
530
531 // Most architectures are going to legalize <s8 loads into at least a 1 byte
532 // load, and the MMOs can only describe memory accesses in multiples of bytes.
533 // If we try to perform extload combining on those, we can end up with
534 // %a(s8) = extload %ptr (load 1 byte from %ptr)
535 // ... which is an illegal extload instruction.
536 if (LoadValueTy.getSizeInBits() < 8)
537 return false;
538
539 // For non power-of-2 types, they will very likely be legalized into multiple
540 // loads. Don't bother trying to match them into extending loads.
541 if (!llvm::has_single_bit<uint32_t>(LoadValueTy.getSizeInBits()))
542 return false;
543
544 // Find the preferred type aside from the any-extends (unless it's the only
545 // one) and non-extending ops. We'll emit an extending load to that type and
546 // and emit a variant of (extend (trunc X)) for the others according to the
547 // relative type sizes. At the same time, pick an extend to use based on the
548 // extend involved in the chosen type.
549 unsigned PreferredOpcode =
550 isa<GLoad>(&MI)
551 ? TargetOpcode::G_ANYEXT
552 : isa<GSExtLoad>(&MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
553 Preferred = {LLT(), PreferredOpcode, nullptr};
554 for (auto &UseMI : MRI.use_nodbg_instructions(LoadReg)) {
555 if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
556 UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
557 (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
558 const auto &MMO = LoadMI->getMMO();
559 // For atomics, only form anyextending loads.
560 if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
561 continue;
562 // Check for legality.
563 if (!isPreLegalize()) {
564 LegalityQuery::MemDesc MMDesc(MMO);
565 unsigned CandidateLoadOpc = getExtLoadOpcForExtend(UseMI.getOpcode());
566 LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
567 LLT SrcTy = MRI.getType(LoadMI->getPointerReg());
568 if (LI->getAction({CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}})
569 .Action != LegalizeActions::Legal)
570 continue;
571 }
572 Preferred = ChoosePreferredUse(MI, Preferred,
573 MRI.getType(UseMI.getOperand(0).getReg()),
574 UseMI.getOpcode(), &UseMI);
575 }
576 }
577
578 // There were no extends
579 if (!Preferred.MI)
580 return false;
581 // It should be impossible to chose an extend without selecting a different
582 // type since by definition the result of an extend is larger.
583 assert(Preferred.Ty != LoadValueTy && "Extending to same type?")(static_cast <bool> (Preferred.Ty != LoadValueTy &&
"Extending to same type?") ? void (0) : __assert_fail ("Preferred.Ty != LoadValueTy && \"Extending to same type?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 583, __extension__
__PRETTY_FUNCTION__))
;
584
585 LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << "Preferred use is: " <<
*Preferred.MI; } } while (false)
;
586 return true;
587}
588
589void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
590 PreferredTuple &Preferred) {
591 // Rewrite the load to the chosen extending load.
592 Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
593
594 // Inserter to insert a truncate back to the original type at a given point
595 // with some basic CSE to limit truncate duplication to one per BB.
596 DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
597 auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
598 MachineBasicBlock::iterator InsertBefore,
599 MachineOperand &UseMO) {
600 MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
601 if (PreviouslyEmitted) {
602 Observer.changingInstr(*UseMO.getParent());
603 UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
604 Observer.changedInstr(*UseMO.getParent());
605 return;
606 }
607
608 Builder.setInsertPt(*InsertIntoBB, InsertBefore);
609 Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
610 MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
611 EmittedInsns[InsertIntoBB] = NewMI;
612 replaceRegOpWith(MRI, UseMO, NewDstReg);
613 };
614
615 Observer.changingInstr(MI);
616 unsigned LoadOpc = getExtLoadOpcForExtend(Preferred.ExtendOpcode);
617 MI.setDesc(Builder.getTII().get(LoadOpc));
618
619 // Rewrite all the uses to fix up the types.
620 auto &LoadValue = MI.getOperand(0);
621 SmallVector<MachineOperand *, 4> Uses;
622 for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
623 Uses.push_back(&UseMO);
624
625 for (auto *UseMO : Uses) {
626 MachineInstr *UseMI = UseMO->getParent();
627
628 // If the extend is compatible with the preferred extend then we should fix
629 // up the type and extend so that it uses the preferred use.
630 if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
631 UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
632 Register UseDstReg = UseMI->getOperand(0).getReg();
633 MachineOperand &UseSrcMO = UseMI->getOperand(1);
634 const LLT UseDstTy = MRI.getType(UseDstReg);
635 if (UseDstReg != ChosenDstReg) {
636 if (Preferred.Ty == UseDstTy) {
637 // If the use has the same type as the preferred use, then merge
638 // the vregs and erase the extend. For example:
639 // %1:_(s8) = G_LOAD ...
640 // %2:_(s32) = G_SEXT %1(s8)
641 // %3:_(s32) = G_ANYEXT %1(s8)
642 // ... = ... %3(s32)
643 // rewrites to:
644 // %2:_(s32) = G_SEXTLOAD ...
645 // ... = ... %2(s32)
646 replaceRegWith(MRI, UseDstReg, ChosenDstReg);
647 Observer.erasingInstr(*UseMO->getParent());
648 UseMO->getParent()->eraseFromParent();
649 } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
650 // If the preferred size is smaller, then keep the extend but extend
651 // from the result of the extending load. For example:
652 // %1:_(s8) = G_LOAD ...
653 // %2:_(s32) = G_SEXT %1(s8)
654 // %3:_(s64) = G_ANYEXT %1(s8)
655 // ... = ... %3(s64)
656 /// rewrites to:
657 // %2:_(s32) = G_SEXTLOAD ...
658 // %3:_(s64) = G_ANYEXT %2:_(s32)
659 // ... = ... %3(s64)
660 replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
661 } else {
662 // If the preferred size is large, then insert a truncate. For
663 // example:
664 // %1:_(s8) = G_LOAD ...
665 // %2:_(s64) = G_SEXT %1(s8)
666 // %3:_(s32) = G_ZEXT %1(s8)
667 // ... = ... %3(s32)
668 /// rewrites to:
669 // %2:_(s64) = G_SEXTLOAD ...
670 // %4:_(s8) = G_TRUNC %2:_(s32)
671 // %3:_(s64) = G_ZEXT %2:_(s8)
672 // ... = ... %3(s64)
673 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
674 InsertTruncAt);
675 }
676 continue;
677 }
678 // The use is (one of) the uses of the preferred use we chose earlier.
679 // We're going to update the load to def this value later so just erase
680 // the old extend.
681 Observer.erasingInstr(*UseMO->getParent());
682 UseMO->getParent()->eraseFromParent();
683 continue;
684 }
685
686 // The use isn't an extend. Truncate back to the type we originally loaded.
687 // This is free on many targets.
688 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
689 }
690
691 MI.getOperand(0).setReg(ChosenDstReg);
692 Observer.changedInstr(MI);
693}
694
695bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
696 BuildFnTy &MatchInfo) {
697 assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 697, __extension__
__PRETTY_FUNCTION__))
;
698
699 // If we have the following code:
700 // %mask = G_CONSTANT 255
701 // %ld = G_LOAD %ptr, (load s16)
702 // %and = G_AND %ld, %mask
703 //
704 // Try to fold it into
705 // %ld = G_ZEXTLOAD %ptr, (load s8)
706
707 Register Dst = MI.getOperand(0).getReg();
708 if (MRI.getType(Dst).isVector())
709 return false;
710
711 auto MaybeMask =
712 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
713 if (!MaybeMask)
714 return false;
715
716 APInt MaskVal = MaybeMask->Value;
717
718 if (!MaskVal.isMask())
719 return false;
720
721 Register SrcReg = MI.getOperand(1).getReg();
722 // Don't use getOpcodeDef() here since intermediate instructions may have
723 // multiple users.
724 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg));
725 if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg()))
726 return false;
727
728 Register LoadReg = LoadMI->getDstReg();
729 LLT RegTy = MRI.getType(LoadReg);
730 Register PtrReg = LoadMI->getPointerReg();
731 unsigned RegSize = RegTy.getSizeInBits();
732 uint64_t LoadSizeBits = LoadMI->getMemSizeInBits();
733 unsigned MaskSizeBits = MaskVal.countr_one();
734
735 // The mask may not be larger than the in-memory type, as it might cover sign
736 // extended bits
737 if (MaskSizeBits > LoadSizeBits)
738 return false;
739
740 // If the mask covers the whole destination register, there's nothing to
741 // extend
742 if (MaskSizeBits >= RegSize)
743 return false;
744
745 // Most targets cannot deal with loads of size < 8 and need to re-legalize to
746 // at least byte loads. Avoid creating such loads here
747 if (MaskSizeBits < 8 || !isPowerOf2_32(MaskSizeBits))
748 return false;
749
750 const MachineMemOperand &MMO = LoadMI->getMMO();
751 LegalityQuery::MemDesc MemDesc(MMO);
752
753 // Don't modify the memory access size if this is atomic/volatile, but we can
754 // still adjust the opcode to indicate the high bit behavior.
755 if (LoadMI->isSimple())
756 MemDesc.MemoryTy = LLT::scalar(MaskSizeBits);
757 else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize)
758 return false;
759
760 // TODO: Could check if it's legal with the reduced or original memory size.
761 if (!isLegalOrBeforeLegalizer(
762 {TargetOpcode::G_ZEXTLOAD, {RegTy, MRI.getType(PtrReg)}, {MemDesc}}))
763 return false;
764
765 MatchInfo = [=](MachineIRBuilder &B) {
766 B.setInstrAndDebugLoc(*LoadMI);
767 auto &MF = B.getMF();
768 auto PtrInfo = MMO.getPointerInfo();
769 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.MemoryTy);
770 B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
771 LoadMI->eraseFromParent();
772 };
773 return true;
774}
775
776bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
777 const MachineInstr &UseMI) {
778 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&(static_cast <bool> (!DefMI.isDebugInstr() && !
UseMI.isDebugInstr() && "shouldn't consider debug uses"
) ? void (0) : __assert_fail ("!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && \"shouldn't consider debug uses\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 779, __extension__
__PRETTY_FUNCTION__))
779 "shouldn't consider debug uses")(static_cast <bool> (!DefMI.isDebugInstr() && !
UseMI.isDebugInstr() && "shouldn't consider debug uses"
) ? void (0) : __assert_fail ("!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && \"shouldn't consider debug uses\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 779, __extension__
__PRETTY_FUNCTION__))
;
780 assert(DefMI.getParent() == UseMI.getParent())(static_cast <bool> (DefMI.getParent() == UseMI.getParent
()) ? void (0) : __assert_fail ("DefMI.getParent() == UseMI.getParent()"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 780, __extension__
__PRETTY_FUNCTION__))
;
781 if (&DefMI == &UseMI)
782 return true;
783 const MachineBasicBlock &MBB = *DefMI.getParent();
784 auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) {
785 return &MI == &DefMI || &MI == &UseMI;
786 });
787 if (DefOrUse == MBB.end())
788 llvm_unreachable("Block must contain both DefMI and UseMI!")::llvm::llvm_unreachable_internal("Block must contain both DefMI and UseMI!"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 788)
;
789 return &*DefOrUse == &DefMI;
790}
791
792bool CombinerHelper::dominates(const MachineInstr &DefMI,
793 const MachineInstr &UseMI) {
794 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&(static_cast <bool> (!DefMI.isDebugInstr() && !
UseMI.isDebugInstr() && "shouldn't consider debug uses"
) ? void (0) : __assert_fail ("!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && \"shouldn't consider debug uses\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 795, __extension__
__PRETTY_FUNCTION__))
795 "shouldn't consider debug uses")(static_cast <bool> (!DefMI.isDebugInstr() && !
UseMI.isDebugInstr() && "shouldn't consider debug uses"
) ? void (0) : __assert_fail ("!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && \"shouldn't consider debug uses\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 795, __extension__
__PRETTY_FUNCTION__))
;
796 if (MDT)
797 return MDT->dominates(&DefMI, &UseMI);
798 else if (DefMI.getParent() != UseMI.getParent())
799 return false;
800
801 return isPredecessor(DefMI, UseMI);
802}
803
804bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
805 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SEXT_INREG
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SEXT_INREG"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 805, __extension__
__PRETTY_FUNCTION__))
;
806 Register SrcReg = MI.getOperand(1).getReg();
807 Register LoadUser = SrcReg;
808
809 if (MRI.getType(SrcReg).isVector())
810 return false;
811
812 Register TruncSrc;
813 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
814 LoadUser = TruncSrc;
815
816 uint64_t SizeInBits = MI.getOperand(2).getImm();
817 // If the source is a G_SEXTLOAD from the same bit width, then we don't
818 // need any extend at all, just a truncate.
819 if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) {
820 // If truncating more than the original extended value, abort.
821 auto LoadSizeBits = LoadMI->getMemSizeInBits();
822 if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits)
823 return false;
824 if (LoadSizeBits == SizeInBits)
825 return true;
826 }
827 return false;
828}
829
830void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
831 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SEXT_INREG
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SEXT_INREG"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 831, __extension__
__PRETTY_FUNCTION__))
;
832 Builder.setInstrAndDebugLoc(MI);
833 Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
834 MI.eraseFromParent();
835}
836
837bool CombinerHelper::matchSextInRegOfLoad(
838 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
839 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SEXT_INREG
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SEXT_INREG"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 839, __extension__
__PRETTY_FUNCTION__))
;
840
841 Register DstReg = MI.getOperand(0).getReg();
842 LLT RegTy = MRI.getType(DstReg);
843
844 // Only supports scalars for now.
845 if (RegTy.isVector())
846 return false;
847
848 Register SrcReg = MI.getOperand(1).getReg();
849 auto *LoadDef = getOpcodeDef<GLoad>(SrcReg, MRI);
850 if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg))
851 return false;
852
853 uint64_t MemBits = LoadDef->getMemSizeInBits();
854
855 // If the sign extend extends from a narrower width than the load's width,
856 // then we can narrow the load width when we combine to a G_SEXTLOAD.
857 // Avoid widening the load at all.
858 unsigned NewSizeBits = std::min((uint64_t)MI.getOperand(2).getImm(), MemBits);
859
860 // Don't generate G_SEXTLOADs with a < 1 byte width.
861 if (NewSizeBits < 8)
862 return false;
863 // Don't bother creating a non-power-2 sextload, it will likely be broken up
864 // anyway for most targets.
865 if (!isPowerOf2_32(NewSizeBits))
866 return false;
867
868 const MachineMemOperand &MMO = LoadDef->getMMO();
869 LegalityQuery::MemDesc MMDesc(MMO);
870
871 // Don't modify the memory access size if this is atomic/volatile, but we can
872 // still adjust the opcode to indicate the high bit behavior.
873 if (LoadDef->isSimple())
874 MMDesc.MemoryTy = LLT::scalar(NewSizeBits);
875 else if (MemBits > NewSizeBits || MemBits == RegTy.getSizeInBits())
876 return false;
877
878 // TODO: Could check if it's legal with the reduced or original memory size.
879 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SEXTLOAD,
880 {MRI.getType(LoadDef->getDstReg()),
881 MRI.getType(LoadDef->getPointerReg())},
882 {MMDesc}}))
883 return false;
884
885 MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
886 return true;
887}
888
889void CombinerHelper::applySextInRegOfLoad(
890 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
891 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SEXT_INREG
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SEXT_INREG"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 891, __extension__
__PRETTY_FUNCTION__))
;
892 Register LoadReg;
893 unsigned ScalarSizeBits;
894 std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
895 GLoad *LoadDef = cast<GLoad>(MRI.getVRegDef(LoadReg));
896
897 // If we have the following:
898 // %ld = G_LOAD %ptr, (load 2)
899 // %ext = G_SEXT_INREG %ld, 8
900 // ==>
901 // %ld = G_SEXTLOAD %ptr (load 1)
902
903 auto &MMO = LoadDef->getMMO();
904 Builder.setInstrAndDebugLoc(*LoadDef);
905 auto &MF = Builder.getMF();
906 auto PtrInfo = MMO.getPointerInfo();
907 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
908 Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
909 LoadDef->getPointerReg(), *NewMMO);
910 MI.eraseFromParent();
911}
912
913bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
914 Register &Base, Register &Offset) {
915 auto &MF = *MI.getParent()->getParent();
916 const auto &TLI = *MF.getSubtarget().getTargetLowering();
917
918#ifndef NDEBUG
919 unsigned Opcode = MI.getOpcode();
920 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||(static_cast <bool> (Opcode == TargetOpcode::G_LOAD || Opcode
== TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD
|| Opcode == TargetOpcode::G_STORE) ? void (0) : __assert_fail
("Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 921, __extension__
__PRETTY_FUNCTION__))
921 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE)(static_cast <bool> (Opcode == TargetOpcode::G_LOAD || Opcode
== TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD
|| Opcode == TargetOpcode::G_STORE) ? void (0) : __assert_fail
("Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 921, __extension__
__PRETTY_FUNCTION__))
;
922#endif
923
924 Base = MI.getOperand(1).getReg();
925 MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
926 if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
927 return false;
928
929 LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << "Searching for post-indexing opportunity for: "
<< MI; } } while (false)
;
930 // FIXME: The following use traversal needs a bail out for patholigical cases.
931 for (auto &Use : MRI.use_nodbg_instructions(Base)) {
932 if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
933 continue;
934
935 Offset = Use.getOperand(2).getReg();
936 if (!ForceLegalIndexing &&
937 !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
938 LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Ignoring candidate with illegal addrmode: "
<< Use; } } while (false)
939 << Use)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Ignoring candidate with illegal addrmode: "
<< Use; } } while (false)
;
940 continue;
941 }
942
943 // Make sure the offset calculation is before the potentially indexed op.
944 // FIXME: we really care about dependency here. The offset calculation might
945 // be movable.
946 MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
947 if (!OffsetDef || !dominates(*OffsetDef, MI)) {
948 LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Ignoring candidate with offset after mem-op: "
<< Use; } } while (false)
949 << Use)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Ignoring candidate with offset after mem-op: "
<< Use; } } while (false)
;
950 continue;
951 }
952
953 // FIXME: check whether all uses of Base are load/store with foldable
954 // addressing modes. If so, using the normal addr-modes is better than
955 // forming an indexed one.
956
957 bool MemOpDominatesAddrUses = true;
958 for (auto &PtrAddUse :
959 MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
960 if (!dominates(MI, PtrAddUse)) {
961 MemOpDominatesAddrUses = false;
962 break;
963 }
964 }
965
966 if (!MemOpDominatesAddrUses) {
967 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Ignoring candidate as memop does not dominate uses: "
<< Use; } } while (false)
968 dbgs() << " Ignoring candidate as memop does not dominate uses: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Ignoring candidate as memop does not dominate uses: "
<< Use; } } while (false)
969 << Use)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Ignoring candidate as memop does not dominate uses: "
<< Use; } } while (false)
;
970 continue;
971 }
972
973 LLVM_DEBUG(dbgs() << " Found match: " << Use)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Found match: " <<
Use; } } while (false)
;
974 Addr = Use.getOperand(0).getReg();
975 return true;
976 }
977
978 return false;
979}
980
981bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
982 Register &Base, Register &Offset) {
983 auto &MF = *MI.getParent()->getParent();
984 const auto &TLI = *MF.getSubtarget().getTargetLowering();
985
986#ifndef NDEBUG
987 unsigned Opcode = MI.getOpcode();
988 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||(static_cast <bool> (Opcode == TargetOpcode::G_LOAD || Opcode
== TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD
|| Opcode == TargetOpcode::G_STORE) ? void (0) : __assert_fail
("Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 989, __extension__
__PRETTY_FUNCTION__))
989 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE)(static_cast <bool> (Opcode == TargetOpcode::G_LOAD || Opcode
== TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD
|| Opcode == TargetOpcode::G_STORE) ? void (0) : __assert_fail
("Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 989, __extension__
__PRETTY_FUNCTION__))
;
990#endif
991
992 Addr = MI.getOperand(1).getReg();
993 MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
994 if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
995 return false;
996
997 Base = AddrDef->getOperand(1).getReg();
998 Offset = AddrDef->getOperand(2).getReg();
999
1000 LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << "Found potential pre-indexed load_store: "
<< MI; } } while (false)
;
1001
1002 if (!ForceLegalIndexing &&
1003 !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
1004 LLVM_DEBUG(dbgs() << " Skipping, not legal for target")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Skipping, not legal for target"
; } } while (false)
;
1005 return false;
1006 }
1007
1008 MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
1009 if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
1010 LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Skipping, frame index would need copy anyway."
; } } while (false)
;
1011 return false;
1012 }
1013
1014 if (MI.getOpcode() == TargetOpcode::G_STORE) {
1015 // Would require a copy.
1016 if (Base == MI.getOperand(0).getReg()) {
1017 LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Skipping, storing base so need copy anyway."
; } } while (false)
;
1018 return false;
1019 }
1020
1021 // We're expecting one use of Addr in MI, but it could also be the
1022 // value stored, which isn't actually dominated by the instruction.
1023 if (MI.getOperand(0).getReg() == Addr) {
1024 LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Skipping, does not dominate all addr uses"
; } } while (false)
;
1025 return false;
1026 }
1027 }
1028
1029 // FIXME: check whether all uses of the base pointer are constant PtrAdds.
1030 // That might allow us to end base's liveness here by adjusting the constant.
1031
1032 for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
1033 if (!dominates(MI, UseMI)) {
1034 LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Skipping, does not dominate all addr uses."
; } } while (false)
;
1035 return false;
1036 }
1037 }
1038
1039 return true;
1040}
1041
1042bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
1043 IndexedLoadStoreMatchInfo MatchInfo;
1044 if (matchCombineIndexedLoadStore(MI, MatchInfo)) {
1045 applyCombineIndexedLoadStore(MI, MatchInfo);
1046 return true;
1047 }
1048 return false;
1049}
1050
1051bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
1052 unsigned Opcode = MI.getOpcode();
1053 if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
1054 Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
1055 return false;
1056
1057 // For now, no targets actually support these opcodes so don't waste time
1058 // running these unless we're forced to for testing.
1059 if (!ForceLegalIndexing)
1060 return false;
1061
1062 MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
1063 MatchInfo.Offset);
1064 if (!MatchInfo.IsPre &&
1065 !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
1066 MatchInfo.Offset))
1067 return false;
1068
1069 return true;
1070}
1071
1072void CombinerHelper::applyCombineIndexedLoadStore(
1073 MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
1074 MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
1075 MachineIRBuilder MIRBuilder(MI);
1076 unsigned Opcode = MI.getOpcode();
1077 bool IsStore = Opcode == TargetOpcode::G_STORE;
1078 unsigned NewOpcode;
1079 switch (Opcode) {
1080 case TargetOpcode::G_LOAD:
1081 NewOpcode = TargetOpcode::G_INDEXED_LOAD;
1082 break;
1083 case TargetOpcode::G_SEXTLOAD:
1084 NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
1085 break;
1086 case TargetOpcode::G_ZEXTLOAD:
1087 NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
1088 break;
1089 case TargetOpcode::G_STORE:
1090 NewOpcode = TargetOpcode::G_INDEXED_STORE;
1091 break;
1092 default:
1093 llvm_unreachable("Unknown load/store opcode")::llvm::llvm_unreachable_internal("Unknown load/store opcode"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1093)
;
1094 }
1095
1096 auto MIB = MIRBuilder.buildInstr(NewOpcode);
1097 if (IsStore) {
1098 MIB.addDef(MatchInfo.Addr);
1099 MIB.addUse(MI.getOperand(0).getReg());
1100 } else {
1101 MIB.addDef(MI.getOperand(0).getReg());
1102 MIB.addDef(MatchInfo.Addr);
1103 }
1104
1105 MIB.addUse(MatchInfo.Base);
1106 MIB.addUse(MatchInfo.Offset);
1107 MIB.addImm(MatchInfo.IsPre);
1108 MI.eraseFromParent();
1109 AddrDef.eraseFromParent();
1110
1111 LLVM_DEBUG(dbgs() << " Combinined to indexed operation")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("gi-combiner")) { dbgs() << " Combinined to indexed operation"
; } } while (false)
;
1112}
1113
1114bool CombinerHelper::matchCombineDivRem(MachineInstr &MI,
1115 MachineInstr *&OtherMI) {
1116 unsigned Opcode = MI.getOpcode();
1117 bool IsDiv, IsSigned;
1118
1119 switch (Opcode) {
1120 default:
1121 llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp"
, 1121)
;
1122 case TargetOpcode::G_SDIV:
1123 case TargetOpcode::G_UDIV: {
1124 IsDiv = true;
1125 IsSigned = Opcode == TargetOpcode::G_SDIV;
1126 break;
1127 }
1128 case TargetOpcode::G_SREM:
1129 case TargetOpcode::G_UREM: {
1130 IsDiv = false;
1131 IsSigned = Opcode == TargetOpcode::G_SREM;
1132 break;
1133 }
1134 }
1135
1136 Register Src1 = MI.getOperand(1).getReg();
1137 unsigned DivOpcode, RemOpcode, DivremOpcode;
1138 if (IsSigned) {
1139 DivOpcode = TargetOpcode::G_SDIV;
1140 RemOpcode = TargetOpcode::G_SREM;
1141 DivremOpcode = TargetOpcode::G_SDIVREM;
1142 } else {
1143 DivOpcode = TargetOpcode::G_UDIV;
1144 RemOpcode = TargetOpcode::G_UREM;
1145 DivremOpcode = TargetOpcode::G_UDIVREM;
1146 }
1147
1148 if (!isLegalOrBeforeLegalizer({DivremOpcode, {MRI.getType(Src1)}}))
1149 return false;
1150
1151 // Combine:
1152 // %div:_ = G_[SU]DIV %src1:_, %src2:_
1153 // %rem:_ = G_[SU]REM %src1:_, %src2:_
1154 // into:
1155 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1156
1157 // Combine:
1158 // %rem:_ = G_[SU]REM %src1:_, %src2:_
1159 // %div:_ = G_[SU]DIV %src1:_, %src2:_
1160 // into:
1161 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1162
1163 for (auto &UseMI : MRI.use_nodbg_instructions(Src1)) {
1164 if (MI.getParent() == UseMI.getParent() &&
1165 ((IsDiv && UseMI.getOpcode() == RemOpcode) ||
1166 (!IsDiv && UseMI.getOpcode() == DivOpcode)) &&
1167 matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2)) &&
1168 matchEqualDefs(MI.getOperand(1), UseMI.getOperand(1))) {
1169 OtherMI = &UseMI;
1170 return true;
1171 }
1172 }
1173
1174 return false;
1175}
1176
1177void CombinerHelper::applyCombineDivRem(MachineInstr &MI,
1178 MachineInstr *&OtherMI) {
1179 unsigned Opcode = MI.getOpcode();
1180 assert(OtherMI && "OtherMI shouldn't be empty.")(static_cast <bool> (OtherMI && "OtherMI shouldn't be empty."
) ? void (0) : __assert_fail ("OtherMI && \"OtherMI shouldn't be empty.\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1180, __extension__
__PRETTY_FUNCTION__))
;
1181
1182 Register DestDivReg, DestRemReg;
1183 if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1184 DestDivReg = MI.getOperand(0).getReg();
1185 DestRemReg = OtherMI->getOperand(0).getReg();
1186 } else {
1187 DestDivReg = OtherMI->getOperand(0).getReg();
1188 DestRemReg = MI.getOperand(0).getReg();
1189 }
1190
1191 bool IsSigned =
1192 Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1193
1194 // Check which instruction is first in the block so we don't break def-use
1195 // deps by "moving" the instruction incorrectly.
1196 if (dominates(MI, *OtherMI))
1197 Builder.setInstrAndDebugLoc(MI);
1198 else
1199 Builder.setInstrAndDebugLoc(*OtherMI);
1200
1201 Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM
1202 : TargetOpcode::G_UDIVREM,
1203 {DestDivReg, DestRemReg},
1204 {MI.getOperand(1).getReg(), MI.getOperand(2).getReg()});
1205 MI.eraseFromParent();
1206 OtherMI->eraseFromParent();
1207}
1208
1209bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI,
1210 MachineInstr *&BrCond) {
1211 assert(MI.getOpcode() == TargetOpcode::G_BR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_BR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_BR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1211, __extension__
__PRETTY_FUNCTION__))
;
1212
1213 // Try to match the following:
1214 // bb1:
1215 // G_BRCOND %c1, %bb2
1216 // G_BR %bb3
1217 // bb2:
1218 // ...
1219 // bb3:
1220
1221 // The above pattern does not have a fall through to the successor bb2, always
1222 // resulting in a branch no matter which path is taken. Here we try to find
1223 // and replace that pattern with conditional branch to bb3 and otherwise
1224 // fallthrough to bb2. This is generally better for branch predictors.
1225
1226 MachineBasicBlock *MBB = MI.getParent();
1227 MachineBasicBlock::iterator BrIt(MI);
1228 if (BrIt == MBB->begin())
1229 return false;
1230 assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator")(static_cast <bool> (std::next(BrIt) == MBB->end() &&
"expected G_BR to be a terminator") ? void (0) : __assert_fail
("std::next(BrIt) == MBB->end() && \"expected G_BR to be a terminator\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1230, __extension__
__PRETTY_FUNCTION__))
;
1231
1232 BrCond = &*std::prev(BrIt);
1233 if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
1234 return false;
1235
1236 // Check that the next block is the conditional branch target. Also make sure
1237 // that it isn't the same as the G_BR's target (otherwise, this will loop.)
1238 MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB();
1239 return BrCondTarget != MI.getOperand(0).getMBB() &&
1240 MBB->isLayoutSuccessor(BrCondTarget);
1241}
1242
1243void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI,
1244 MachineInstr *&BrCond) {
1245 MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
1246 Builder.setInstrAndDebugLoc(*BrCond);
1247 LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
1248 // FIXME: Does int/fp matter for this? If so, we might need to restrict
1249 // this to i1 only since we might not know for sure what kind of
1250 // compare generated the condition value.
1251 auto True = Builder.buildConstant(
1252 Ty, getICmpTrueVal(getTargetLowering(), false, false));
1253 auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True);
1254
1255 auto *FallthroughBB = BrCond->getOperand(1).getMBB();
1256 Observer.changingInstr(MI);
1257 MI.getOperand(0).setMBB(FallthroughBB);
1258 Observer.changedInstr(MI);
1259
1260 // Change the conditional branch to use the inverted condition and
1261 // new target block.
1262 Observer.changingInstr(*BrCond);
1263 BrCond->getOperand(0).setReg(Xor.getReg(0));
1264 BrCond->getOperand(1).setMBB(BrTarget);
1265 Observer.changedInstr(*BrCond);
1266}
1267
1268static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
1269 if (Ty.isVector())
1270 return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1271 Ty.getNumElements());
1272 return IntegerType::get(C, Ty.getSizeInBits());
1273}
1274
1275bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) {
1276 MachineIRBuilder HelperBuilder(MI);
1277 GISelObserverWrapper DummyObserver;
1278 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1279 return Helper.lowerMemcpyInline(MI) ==
1280 LegalizerHelper::LegalizeResult::Legalized;
1281}
1282
1283bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1284 MachineIRBuilder HelperBuilder(MI);
1285 GISelObserverWrapper DummyObserver;
1286 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1287 return Helper.lowerMemCpyFamily(MI, MaxLen) ==
1288 LegalizerHelper::LegalizeResult::Legalized;
1289}
1290
1291static std::optional<APFloat>
1292constantFoldFpUnary(unsigned Opcode, LLT DstTy, const Register Op,
1293 const MachineRegisterInfo &MRI) {
1294 const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI);
1295 if (!MaybeCst)
1296 return std::nullopt;
1297
1298 APFloat V = MaybeCst->getValueAPF();
1299 switch (Opcode) {
1300 default:
1301 llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp"
, 1301)
;
1302 case TargetOpcode::G_FNEG: {
1303 V.changeSign();
1304 return V;
1305 }
1306 case TargetOpcode::G_FABS: {
1307 V.clearSign();
1308 return V;
1309 }
1310 case TargetOpcode::G_FPTRUNC:
1311 break;
1312 case TargetOpcode::G_FSQRT: {
1313 bool Unused;
1314 V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1315 V = APFloat(sqrt(V.convertToDouble()));
1316 break;
1317 }
1318 case TargetOpcode::G_FLOG2: {
1319 bool Unused;
1320 V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1321 V = APFloat(log2(V.convertToDouble()));
1322 break;
1323 }
1324 }
1325 // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1326 // `buildFConstant` will assert on size mismatch. Only `G_FPTRUNC`, `G_FSQRT`,
1327 // and `G_FLOG2` reach here.
1328 bool Unused;
1329 V.convert(getFltSemanticForLLT(DstTy), APFloat::rmNearestTiesToEven, &Unused);
1330 return V;
1331}
1332
1333bool CombinerHelper::matchCombineConstantFoldFpUnary(
1334 MachineInstr &MI, std::optional<APFloat> &Cst) {
1335 Register DstReg = MI.getOperand(0).getReg();
1336 Register SrcReg = MI.getOperand(1).getReg();
1337 LLT DstTy = MRI.getType(DstReg);
1338 Cst = constantFoldFpUnary(MI.getOpcode(), DstTy, SrcReg, MRI);
1339 return Cst.has_value();
1340}
1341
1342void CombinerHelper::applyCombineConstantFoldFpUnary(
1343 MachineInstr &MI, std::optional<APFloat> &Cst) {
1344 assert(Cst && "Optional is unexpectedly empty!")(static_cast <bool> (Cst && "Optional is unexpectedly empty!"
) ? void (0) : __assert_fail ("Cst && \"Optional is unexpectedly empty!\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1344, __extension__
__PRETTY_FUNCTION__))
;
1345 Builder.setInstrAndDebugLoc(MI);
1346 MachineFunction &MF = Builder.getMF();
1347 auto *FPVal = ConstantFP::get(MF.getFunction().getContext(), *Cst);
1348 Register DstReg = MI.getOperand(0).getReg();
1349 Builder.buildFConstant(DstReg, *FPVal);
1350 MI.eraseFromParent();
1351}
1352
1353bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
1354 PtrAddChain &MatchInfo) {
1355 // We're trying to match the following pattern:
1356 // %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1357 // %root = G_PTR_ADD %t1, G_CONSTANT imm2
1358 // -->
1359 // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1360
1361 if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1362 return false;
1363
1364 Register Add2 = MI.getOperand(1).getReg();
1365 Register Imm1 = MI.getOperand(2).getReg();
1366 auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1367 if (!MaybeImmVal)
1368 return false;
1369
1370 MachineInstr *Add2Def = MRI.getVRegDef(Add2);
1371 if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1372 return false;
1373
1374 Register Base = Add2Def->getOperand(1).getReg();
1375 Register Imm2 = Add2Def->getOperand(2).getReg();
1376 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1377 if (!MaybeImm2Val)
1378 return false;
1379
1380 // Check if the new combined immediate forms an illegal addressing mode.
1381 // Do not combine if it was legal before but would get illegal.
1382 // To do so, we need to find a load/store user of the pointer to get
1383 // the access type.
1384 Type *AccessTy = nullptr;
1385 auto &MF = *MI.getMF();
1386 for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) {
1387 if (auto *LdSt = dyn_cast<GLoadStore>(&UseMI)) {
1388 AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)),
1389 MF.getFunction().getContext());
1390 break;
1391 }
1392 }
1393 TargetLoweringBase::AddrMode AMNew;
1394 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1395 AMNew.BaseOffs = CombinedImm.getSExtValue();
1396 if (AccessTy) {
1397 AMNew.HasBaseReg = true;
1398 TargetLoweringBase::AddrMode AMOld;
1399 AMOld.BaseOffs = MaybeImm2Val->Value.getSExtValue();
1400 AMOld.HasBaseReg = true;
1401 unsigned AS = MRI.getType(Add2).getAddressSpace();
1402 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1403 if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1404 !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1405 return false;
1406 }
1407
1408 // Pass the combined immediate to the apply function.
1409 MatchInfo.Imm = AMNew.BaseOffs;
1410 MatchInfo.Base = Base;
1411 MatchInfo.Bank = getRegBank(Imm2);
1412 return true;
1413}
1414
1415void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
1416 PtrAddChain &MatchInfo) {
1417 assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_PTR_ADD
&& "Expected G_PTR_ADD") ? void (0) : __assert_fail (
"MI.getOpcode() == TargetOpcode::G_PTR_ADD && \"Expected G_PTR_ADD\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1417, __extension__
__PRETTY_FUNCTION__))
;
1418 MachineIRBuilder MIB(MI);
1419 LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1420 auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1421 setRegBank(NewOffset.getReg(0), MatchInfo.Bank);
1422 Observer.changingInstr(MI);
1423 MI.getOperand(1).setReg(MatchInfo.Base);
1424 MI.getOperand(2).setReg(NewOffset.getReg(0));
1425 Observer.changedInstr(MI);
1426}
1427
1428bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI,
1429 RegisterImmPair &MatchInfo) {
1430 // We're trying to match the following pattern with any of
1431 // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
1432 // %t1 = SHIFT %base, G_CONSTANT imm1
1433 // %root = SHIFT %t1, G_CONSTANT imm2
1434 // -->
1435 // %root = SHIFT %base, G_CONSTANT (imm1 + imm2)
1436
1437 unsigned Opcode = MI.getOpcode();
1438 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::
G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1441, __extension__
__PRETTY_FUNCTION__))
1439 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::
G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1441, __extension__
__PRETTY_FUNCTION__))
1440 Opcode == TargetOpcode::G_USHLSAT) &&(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::
G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1441, __extension__
__PRETTY_FUNCTION__))
1441 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT")(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::
G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1441, __extension__
__PRETTY_FUNCTION__))
;
1442
1443 Register Shl2 = MI.getOperand(1).getReg();
1444 Register Imm1 = MI.getOperand(2).getReg();
1445 auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1446 if (!MaybeImmVal)
1447 return false;
1448
1449 MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2);
1450 if (Shl2Def->getOpcode() != Opcode)
1451 return false;
1452
1453 Register Base = Shl2Def->getOperand(1).getReg();
1454 Register Imm2 = Shl2Def->getOperand(2).getReg();
1455 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1456 if (!MaybeImm2Val)
1457 return false;
1458
1459 // Pass the combined immediate to the apply function.
1460 MatchInfo.Imm =
1461 (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue();
1462 MatchInfo.Reg = Base;
1463
1464 // There is no simple replacement for a saturating unsigned left shift that
1465 // exceeds the scalar size.
1466 if (Opcode == TargetOpcode::G_USHLSAT &&
1467 MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits())
1468 return false;
1469
1470 return true;
1471}
1472
1473void CombinerHelper::applyShiftImmedChain(MachineInstr &MI,
1474 RegisterImmPair &MatchInfo) {
1475 unsigned Opcode = MI.getOpcode();
1476 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::
G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1479, __extension__
__PRETTY_FUNCTION__))
1477 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::
G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1479, __extension__
__PRETTY_FUNCTION__))
1478 Opcode == TargetOpcode::G_USHLSAT) &&(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::
G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1479, __extension__
__PRETTY_FUNCTION__))
1479 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT")(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::
G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1479, __extension__
__PRETTY_FUNCTION__))
;
1480
1481 Builder.setInstrAndDebugLoc(MI);
1482 LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1483 unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
1484 auto Imm = MatchInfo.Imm;
1485
1486 if (Imm >= ScalarSizeInBits) {
1487 // Any logical shift that exceeds scalar size will produce zero.
1488 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1489 Builder.buildConstant(MI.getOperand(0), 0);
1490 MI.eraseFromParent();
1491 return;
1492 }
1493 // Arithmetic shift and saturating signed left shift have no effect beyond
1494 // scalar size.
1495 Imm = ScalarSizeInBits - 1;
1496 }
1497
1498 LLT ImmTy = MRI.getType(MI.getOperand(2).getReg());
1499 Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0);
1500 Observer.changingInstr(MI);
1501 MI.getOperand(1).setReg(MatchInfo.Reg);
1502 MI.getOperand(2).setReg(NewImm);
1503 Observer.changedInstr(MI);
1504}
1505
1506bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI,
1507 ShiftOfShiftedLogic &MatchInfo) {
1508 // We're trying to match the following pattern with any of
1509 // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
1510 // with any of G_AND/G_OR/G_XOR logic instructions.
1511 // %t1 = SHIFT %X, G_CONSTANT C0
1512 // %t2 = LOGIC %t1, %Y
1513 // %root = SHIFT %t2, G_CONSTANT C1
1514 // -->
1515 // %t3 = SHIFT %X, G_CONSTANT (C0+C1)
1516 // %t4 = SHIFT %Y, G_CONSTANT C1
1517 // %root = LOGIC %t3, %t4
1518 unsigned ShiftOpcode = MI.getOpcode();
1519 assert((ShiftOpcode == TargetOpcode::G_SHL ||(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL
|| ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode
::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode
== TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"
) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__
__PRETTY_FUNCTION__))
1520 ShiftOpcode == TargetOpcode::G_ASHR ||(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL
|| ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode
::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode
== TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"
) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__
__PRETTY_FUNCTION__))
1521 ShiftOpcode == TargetOpcode::G_LSHR ||(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL
|| ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode
::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode
== TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"
) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__
__PRETTY_FUNCTION__))
1522 ShiftOpcode == TargetOpcode::G_USHLSAT ||(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL
|| ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode
::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode
== TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"
) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__
__PRETTY_FUNCTION__))
1523 ShiftOpcode == TargetOpcode::G_SSHLSAT) &&(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL
|| ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode
::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode
== TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"
) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__
__PRETTY_FUNCTION__))
1524 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT")(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL
|| ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode
::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode
== TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"
) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__
__PRETTY_FUNCTION__))
;
1525
1526 // Match a one-use bitwise logic op.
1527 Register LogicDest = MI.getOperand(1).getReg();
1528 if (!MRI.hasOneNonDBGUse(LogicDest))
1529 return false;
1530
1531 MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest);
1532 unsigned LogicOpcode = LogicMI->getOpcode();
1533 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1534 LogicOpcode != TargetOpcode::G_XOR)
1535 return false;
1536
1537 // Find a matching one-use shift by constant.
1538 const Register C1 = MI.getOperand(2).getReg();
1539 auto MaybeImmVal = getIConstantVRegValWithLookThrough(C1, MRI);
1540 if (!MaybeImmVal)
1541 return false;
1542
1543 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1544
1545 auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) {
1546 // Shift should match previous one and should be a one-use.
1547 if (MI->getOpcode() != ShiftOpcode ||
1548 !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1549 return false;
1550
1551 // Must be a constant.
1552 auto MaybeImmVal =
1553 getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI);
1554 if (!MaybeImmVal)
1555 return false;
1556
1557 ShiftVal = MaybeImmVal->Value.getSExtValue();
1558 return true;
1559 };
1560
1561 // Logic ops are commutative, so check each operand for a match.
1562 Register LogicMIReg1 = LogicMI->getOperand(1).getReg();
1563 MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1);
1564 Register LogicMIReg2 = LogicMI->getOperand(2).getReg();
1565 MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2);
1566 uint64_t C0Val;
1567
1568 if (matchFirstShift(LogicMIOp1, C0Val)) {
1569 MatchInfo.LogicNonShiftReg = LogicMIReg2;
1570 MatchInfo.Shift2 = LogicMIOp1;
1571 } else if (matchFirstShift(LogicMIOp2, C0Val)) {
1572 MatchInfo.LogicNonShiftReg = LogicMIReg1;
1573 MatchInfo.Shift2 = LogicMIOp2;
1574 } else
1575 return false;
1576
1577 MatchInfo.ValSum = C0Val + C1Val;
1578
1579 // The fold is not valid if the sum of the shift values exceeds bitwidth.
1580 if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits())
1581 return false;
1582
1583 MatchInfo.Logic = LogicMI;
1584 return true;
1585}
1586
1587void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI,
1588 ShiftOfShiftedLogic &MatchInfo) {
1589 unsigned Opcode = MI.getOpcode();
1590 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::
G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1593, __extension__
__PRETTY_FUNCTION__))
1591 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::
G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1593, __extension__
__PRETTY_FUNCTION__))
1592 Opcode == TargetOpcode::G_SSHLSAT) &&(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::
G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1593, __extension__
__PRETTY_FUNCTION__))
1593 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT")(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode
== TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::
G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"
) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1593, __extension__
__PRETTY_FUNCTION__))
;
1594
1595 LLT ShlType = MRI.getType(MI.getOperand(2).getReg());
1596 LLT DestType = MRI.getType(MI.getOperand(0).getReg());
1597 Builder.setInstrAndDebugLoc(MI);
1598
1599 Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0);
1600
1601 Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg();
1602 Register Shift1 =
1603 Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0);
1604
1605 // If LogicNonShiftReg is the same to Shift1Base, and shift1 const is the same
1606 // to MatchInfo.Shift2 const, CSEMIRBuilder will reuse the old shift1 when
1607 // build shift2. So, if we erase MatchInfo.Shift2 at the end, actually we
1608 // remove old shift1. And it will cause crash later. So erase it earlier to
1609 // avoid the crash.
1610 MatchInfo.Shift2->eraseFromParent();
1611
1612 Register Shift2Const = MI.getOperand(2).getReg();
1613 Register Shift2 = Builder
1614 .buildInstr(Opcode, {DestType},
1615 {MatchInfo.LogicNonShiftReg, Shift2Const})
1616 .getReg(0);
1617
1618 Register Dest = MI.getOperand(0).getReg();
1619 Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2});
1620
1621 // This was one use so it's safe to remove it.
1622 MatchInfo.Logic->eraseFromParent();
1623
1624 MI.eraseFromParent();
1625}
1626
1627bool CombinerHelper::matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) {
1628 assert(MI.getOpcode() == TargetOpcode::G_SHL && "Expected G_SHL")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHL
&& "Expected G_SHL") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SHL && \"Expected G_SHL\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1628, __extension__
__PRETTY_FUNCTION__))
;
1629 // Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1630 // Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1631 auto &Shl = cast<GenericMachineInstr>(MI);
1632 Register DstReg = Shl.getReg(0);
1633 Register SrcReg = Shl.getReg(1);
1634 Register ShiftReg = Shl.getReg(2);
1635 Register X, C1;
1636
1637 if (!getTargetLowering().isDesirableToCommuteWithShift(MI, !isPreLegalize()))
1638 return false;
1639
1640 if (!mi_match(SrcReg, MRI,
1641 m_OneNonDBGUse(m_any_of(m_GAdd(m_Reg(X), m_Reg(C1)),
1642 m_GOr(m_Reg(X), m_Reg(C1))))))
1643 return false;
1644
1645 APInt C1Val, C2Val;
1646 if (!mi_match(C1, MRI, m_ICstOrSplat(C1Val)) ||
1647 !mi_match(ShiftReg, MRI, m_ICstOrSplat(C2Val)))
1648 return false;
1649
1650 auto *SrcDef = MRI.getVRegDef(SrcReg);
1651 assert((SrcDef->getOpcode() == TargetOpcode::G_ADD ||(static_cast <bool> ((SrcDef->getOpcode() == TargetOpcode
::G_ADD || SrcDef->getOpcode() == TargetOpcode::G_OR) &&
"Unexpected op") ? void (0) : __assert_fail ("(SrcDef->getOpcode() == TargetOpcode::G_ADD || SrcDef->getOpcode() == TargetOpcode::G_OR) && \"Unexpected op\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1652, __extension__
__PRETTY_FUNCTION__))
1652 SrcDef->getOpcode() == TargetOpcode::G_OR) && "Unexpected op")(static_cast <bool> ((SrcDef->getOpcode() == TargetOpcode
::G_ADD || SrcDef->getOpcode() == TargetOpcode::G_OR) &&
"Unexpected op") ? void (0) : __assert_fail ("(SrcDef->getOpcode() == TargetOpcode::G_ADD || SrcDef->getOpcode() == TargetOpcode::G_OR) && \"Unexpected op\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1652, __extension__
__PRETTY_FUNCTION__))
;
1653 LLT SrcTy = MRI.getType(SrcReg);
1654 MatchInfo = [=](MachineIRBuilder &B) {
1655 auto S1 = B.buildShl(SrcTy, X, ShiftReg);
1656 auto S2 = B.buildShl(SrcTy, C1, ShiftReg);
1657 B.buildInstr(SrcDef->getOpcode(), {DstReg}, {S1, S2});
1658 };
1659 return true;
1660}
1661
1662bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
1663 unsigned &ShiftVal) {
1664 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_MUL
&& "Expected a G_MUL") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_MUL && \"Expected a G_MUL\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1664, __extension__
__PRETTY_FUNCTION__))
;
1665 auto MaybeImmVal =
1666 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1667 if (!MaybeImmVal)
1668 return false;
1669
1670 ShiftVal = MaybeImmVal->Value.exactLogBase2();
1671 return (static_cast<int32_t>(ShiftVal) != -1);
1672}
1673
1674void CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
1675 unsigned &ShiftVal) {
1676 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_MUL
&& "Expected a G_MUL") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_MUL && \"Expected a G_MUL\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1676, __extension__
__PRETTY_FUNCTION__))
;
1677 MachineIRBuilder MIB(MI);
1678 LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1679 auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1680 Observer.changingInstr(MI);
1681 MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1682 MI.getOperand(2).setReg(ShiftCst.getReg(0));
1683 Observer.changedInstr(MI);
1684}
1685
1686// shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1687bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
1688 RegisterImmPair &MatchData) {
1689 assert(MI.getOpcode() == TargetOpcode::G_SHL && KB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHL
&& KB) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SHL && KB"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1689, __extension__
__PRETTY_FUNCTION__))
;
1690
1691 Register LHS = MI.getOperand(1).getReg();
1692
1693 Register ExtSrc;
1694 if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1695 !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1696 !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1697 return false;
1698
1699 // TODO: Should handle vector splat.
1700 Register RHS = MI.getOperand(2).getReg();
1701 auto MaybeShiftAmtVal = getIConstantVRegValWithLookThrough(RHS, MRI);
1702 if (!MaybeShiftAmtVal)
1703 return false;
1704
1705 if (LI) {
1706 LLT SrcTy = MRI.getType(ExtSrc);
1707
1708 // We only really care about the legality with the shifted value. We can
1709 // pick any type the constant shift amount, so ask the target what to
1710 // use. Otherwise we would have to guess and hope it is reported as legal.
1711 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
1712 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
1713 return false;
1714 }
1715
1716 int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue();
1717 MatchData.Reg = ExtSrc;
1718 MatchData.Imm = ShiftAmt;
1719
1720 unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countl_one();
1721 return MinLeadingZeros >= ShiftAmt;
1722}
1723
1724void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
1725 const RegisterImmPair &MatchData) {
1726 Register ExtSrcReg = MatchData.Reg;
1727 int64_t ShiftAmtVal = MatchData.Imm;
1728
1729 LLT ExtSrcTy = MRI.getType(ExtSrcReg);
1730 Builder.setInstrAndDebugLoc(MI);
1731 auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
1732 auto NarrowShift =
1733 Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
1734 Builder.buildZExt(MI.getOperand(0), NarrowShift);
1735 MI.eraseFromParent();
1736}
1737
1738bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr &MI,
1739 Register &MatchInfo) {
1740 GMerge &Merge = cast<GMerge>(MI);
1741 SmallVector<Register, 16> MergedValues;
1742 for (unsigned I = 0; I < Merge.getNumSources(); ++I)
1743 MergedValues.emplace_back(Merge.getSourceReg(I));
1744
1745 auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0], MRI);
1746 if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources())
1747 return false;
1748
1749 for (unsigned I = 0; I < MergedValues.size(); ++I)
1750 if (MergedValues[I] != Unmerge->getReg(I))
1751 return false;
1752
1753 MatchInfo = Unmerge->getSourceReg();
1754 return true;
1755}
1756
1757static Register peekThroughBitcast(Register Reg,
1758 const MachineRegisterInfo &MRI) {
1759 while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg))))
1760 ;
1761
1762 return Reg;
1763}
1764
1765bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
1766 MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1767 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1768, __extension__
__PRETTY_FUNCTION__))
1768 "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1768, __extension__
__PRETTY_FUNCTION__))
;
1769 auto &Unmerge = cast<GUnmerge>(MI);
1770 Register SrcReg = peekThroughBitcast(Unmerge.getSourceReg(), MRI);
1771
1772 auto *SrcInstr = getOpcodeDef<GMergeLikeInstr>(SrcReg, MRI);
1773 if (!SrcInstr)
1774 return false;
1775
1776 // Check the source type of the merge.
1777 LLT SrcMergeTy = MRI.getType(SrcInstr->getSourceReg(0));
1778 LLT Dst0Ty = MRI.getType(Unmerge.getReg(0));
1779 bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
1780 if (SrcMergeTy != Dst0Ty && !SameSize)
1781 return false;
1782 // They are the same now (modulo a bitcast).
1783 // We can collect all the src registers.
1784 for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx)
1785 Operands.push_back(SrcInstr->getSourceReg(Idx));
1786 return true;
1787}
1788
1789void CombinerHelper::applyCombineUnmergeMergeToPlainValues(
1790 MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1791 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1792, __extension__
__PRETTY_FUNCTION__))
1792 "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1792, __extension__
__PRETTY_FUNCTION__))
;
1793 assert((MI.getNumOperands() - 1 == Operands.size()) &&(static_cast <bool> ((MI.getNumOperands() - 1 == Operands
.size()) && "Not enough operands to replace all defs"
) ? void (0) : __assert_fail ("(MI.getNumOperands() - 1 == Operands.size()) && \"Not enough operands to replace all defs\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1794, __extension__
__PRETTY_FUNCTION__))
1794 "Not enough operands to replace all defs")(static_cast <bool> ((MI.getNumOperands() - 1 == Operands
.size()) && "Not enough operands to replace all defs"
) ? void (0) : __assert_fail ("(MI.getNumOperands() - 1 == Operands.size()) && \"Not enough operands to replace all defs\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1794, __extension__
__PRETTY_FUNCTION__))
;
1795 unsigned NumElems = MI.getNumOperands() - 1;
1796
1797 LLT SrcTy = MRI.getType(Operands[0]);
1798 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1799 bool CanReuseInputDirectly = DstTy == SrcTy;
1800 Builder.setInstrAndDebugLoc(MI);
1801 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1802 Register DstReg = MI.getOperand(Idx).getReg();
1803 Register SrcReg = Operands[Idx];
1804
1805 // This combine may run after RegBankSelect, so we need to be aware of
1806 // register banks.
1807 const auto &DstCB = MRI.getRegClassOrRegBank(DstReg);
1808 if (!DstCB.isNull() && DstCB != MRI.getRegClassOrRegBank(SrcReg)) {
1809 SrcReg = Builder.buildCopy(MRI.getType(SrcReg), SrcReg).getReg(0);
1810 MRI.setRegClassOrRegBank(SrcReg, DstCB);
1811 }
1812
1813 if (CanReuseInputDirectly)
1814 replaceRegWith(MRI, DstReg, SrcReg);
1815 else
1816 Builder.buildCast(DstReg, SrcReg);
1817 }
1818 MI.eraseFromParent();
1819}
1820
1821bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI,
1822 SmallVectorImpl<APInt> &Csts) {
1823 unsigned SrcIdx = MI.getNumOperands() - 1;
1824 Register SrcReg = MI.getOperand(SrcIdx).getReg();
1825 MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1826 if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
1827 SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
1828 return false;
1829 // Break down the big constant in smaller ones.
1830 const MachineOperand &CstVal = SrcInstr->getOperand(1);
1831 APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
1832 ? CstVal.getCImm()->getValue()
1833 : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
1834
1835 LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1836 unsigned ShiftAmt = Dst0Ty.getSizeInBits();
1837 // Unmerge a constant.
1838 for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
1839 Csts.emplace_back(Val.trunc(ShiftAmt));
1840 Val = Val.lshr(ShiftAmt);
1841 }
1842
1843 return true;
1844}
1845
1846void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
1847 SmallVectorImpl<APInt> &Csts) {
1848 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1849, __extension__
__PRETTY_FUNCTION__))
1849 "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1849, __extension__
__PRETTY_FUNCTION__))
;
1850 assert((MI.getNumOperands() - 1 == Csts.size()) &&(static_cast <bool> ((MI.getNumOperands() - 1 == Csts.size
()) && "Not enough operands to replace all defs") ? void
(0) : __assert_fail ("(MI.getNumOperands() - 1 == Csts.size()) && \"Not enough operands to replace all defs\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1851, __extension__
__PRETTY_FUNCTION__))
1851 "Not enough operands to replace all defs")(static_cast <bool> ((MI.getNumOperands() - 1 == Csts.size
()) && "Not enough operands to replace all defs") ? void
(0) : __assert_fail ("(MI.getNumOperands() - 1 == Csts.size()) && \"Not enough operands to replace all defs\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1851, __extension__
__PRETTY_FUNCTION__))
;
1852 unsigned NumElems = MI.getNumOperands() - 1;
1853 Builder.setInstrAndDebugLoc(MI);
1854 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1855 Register DstReg = MI.getOperand(Idx).getReg();
1856 Builder.buildConstant(DstReg, Csts[Idx]);
1857 }
1858
1859 MI.eraseFromParent();
1860}
1861
1862bool CombinerHelper::matchCombineUnmergeUndef(
1863 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
1864 unsigned SrcIdx = MI.getNumOperands() - 1;
1865 Register SrcReg = MI.getOperand(SrcIdx).getReg();
1866 MatchInfo = [&MI](MachineIRBuilder &B) {
1867 unsigned NumElems = MI.getNumOperands() - 1;
1868 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1869 Register DstReg = MI.getOperand(Idx).getReg();
1870 B.buildUndef(DstReg);
1871 }
1872 };
1873 return isa<GImplicitDef>(MRI.getVRegDef(SrcReg));
1874}
1875
1876bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1877 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1878, __extension__
__PRETTY_FUNCTION__))
1878 "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1878, __extension__
__PRETTY_FUNCTION__))
;
1879 // Check that all the lanes are dead except the first one.
1880 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1881 if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg()))
1882 return false;
1883 }
1884 return true;
1885}
1886
1887void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1888 Builder.setInstrAndDebugLoc(MI);
1889 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1890 // Truncating a vector is going to truncate every single lane,
1891 // whereas we want the full lowbits.
1892 // Do the operation on a scalar instead.
1893 LLT SrcTy = MRI.getType(SrcReg);
1894 if (SrcTy.isVector())
1895 SrcReg =
1896 Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0);
1897
1898 Register Dst0Reg = MI.getOperand(0).getReg();
1899 LLT Dst0Ty = MRI.getType(Dst0Reg);
1900 if (Dst0Ty.isVector()) {
1901 auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg);
1902 Builder.buildCast(Dst0Reg, MIB);
1903 } else
1904 Builder.buildTrunc(Dst0Reg, SrcReg);
1905 MI.eraseFromParent();
1906}
1907
1908bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) {
1909 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1910, __extension__
__PRETTY_FUNCTION__))
1910 "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1910, __extension__
__PRETTY_FUNCTION__))
;
1911 Register Dst0Reg = MI.getOperand(0).getReg();
1912 LLT Dst0Ty = MRI.getType(Dst0Reg);
1913 // G_ZEXT on vector applies to each lane, so it will
1914 // affect all destinations. Therefore we won't be able
1915 // to simplify the unmerge to just the first definition.
1916 if (Dst0Ty.isVector())
1917 return false;
1918 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1919 LLT SrcTy = MRI.getType(SrcReg);
1920 if (SrcTy.isVector())
1921 return false;
1922
1923 Register ZExtSrcReg;
1924 if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg))))
1925 return false;
1926
1927 // Finally we can replace the first definition with
1928 // a zext of the source if the definition is big enough to hold
1929 // all of ZExtSrc bits.
1930 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1931 return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
1932}
1933
1934void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
1935 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1936, __extension__
__PRETTY_FUNCTION__))
1936 "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&& "Expected an unmerge") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1936, __extension__
__PRETTY_FUNCTION__))
;
1937
1938 Register Dst0Reg = MI.getOperand(0).getReg();
1939
1940 MachineInstr *ZExtInstr =
1941 MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg());
1942 assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&(static_cast <bool> (ZExtInstr && ZExtInstr->
getOpcode() == TargetOpcode::G_ZEXT && "Expecting a G_ZEXT"
) ? void (0) : __assert_fail ("ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT && \"Expecting a G_ZEXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1943, __extension__
__PRETTY_FUNCTION__))
1943 "Expecting a G_ZEXT")(static_cast <bool> (ZExtInstr && ZExtInstr->
getOpcode() == TargetOpcode::G_ZEXT && "Expecting a G_ZEXT"
) ? void (0) : __assert_fail ("ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT && \"Expecting a G_ZEXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1943, __extension__
__PRETTY_FUNCTION__))
;
1944
1945 Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg();
1946 LLT Dst0Ty = MRI.getType(Dst0Reg);
1947 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1948
1949 Builder.setInstrAndDebugLoc(MI);
1950
1951 if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
1952 Builder.buildZExt(Dst0Reg, ZExtSrcReg);
1953 } else {
1954 assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&(static_cast <bool> (Dst0Ty.getSizeInBits() == ZExtSrcTy
.getSizeInBits() && "ZExt src doesn't fit in destination"
) ? void (0) : __assert_fail ("Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() && \"ZExt src doesn't fit in destination\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1955, __extension__
__PRETTY_FUNCTION__))
1955 "ZExt src doesn't fit in destination")(static_cast <bool> (Dst0Ty.getSizeInBits() == ZExtSrcTy
.getSizeInBits() && "ZExt src doesn't fit in destination"
) ? void (0) : __assert_fail ("Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() && \"ZExt src doesn't fit in destination\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1955, __extension__
__PRETTY_FUNCTION__))
;
1956 replaceRegWith(MRI, Dst0Reg, ZExtSrcReg);
1957 }
1958
1959 Register ZeroReg;
1960 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1961 if (!ZeroReg)
1962 ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0);
1963 replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg);
1964 }
1965 MI.eraseFromParent();
1966}
1967
1968bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
1969 unsigned TargetShiftSize,
1970 unsigned &ShiftVal) {
1971 assert((MI.getOpcode() == TargetOpcode::G_SHL ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_SHL
|| MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() ==
TargetOpcode::G_ASHR) && "Expected a shift") ? void (
0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_SHL || MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() == TargetOpcode::G_ASHR) && \"Expected a shift\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1973, __extension__
__PRETTY_FUNCTION__))
1972 MI.getOpcode() == TargetOpcode::G_LSHR ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_SHL
|| MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() ==
TargetOpcode::G_ASHR) && "Expected a shift") ? void (
0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_SHL || MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() == TargetOpcode::G_ASHR) && \"Expected a shift\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1973, __extension__
__PRETTY_FUNCTION__))
1973 MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift")(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_SHL
|| MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() ==
TargetOpcode::G_ASHR) && "Expected a shift") ? void (
0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_SHL || MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() == TargetOpcode::G_ASHR) && \"Expected a shift\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1973, __extension__
__PRETTY_FUNCTION__))
;
1974
1975 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1976 if (Ty.isVector()) // TODO:
1977 return false;
1978
1979 // Don't narrow further than the requested size.
1980 unsigned Size = Ty.getSizeInBits();
1981 if (Size <= TargetShiftSize)
1982 return false;
1983
1984 auto MaybeImmVal =
1985 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1986 if (!MaybeImmVal)
1987 return false;
1988
1989 ShiftVal = MaybeImmVal->Value.getSExtValue();
1990 return ShiftVal >= Size / 2 && ShiftVal < Size;
1991}
1992
1993void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
1994 const unsigned &ShiftVal) {
1995 Register DstReg = MI.getOperand(0).getReg();
1996 Register SrcReg = MI.getOperand(1).getReg();
1997 LLT Ty = MRI.getType(SrcReg);
1998 unsigned Size = Ty.getSizeInBits();
1999 unsigned HalfSize = Size / 2;
2000 assert(ShiftVal >= HalfSize)(static_cast <bool> (ShiftVal >= HalfSize) ? void (0
) : __assert_fail ("ShiftVal >= HalfSize", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp"
, 2000, __extension__ __PRETTY_FUNCTION__))
;
2001
2002 LLT HalfTy = LLT::scalar(HalfSize);
2003
2004 Builder.setInstr(MI);
2005 auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
2006 unsigned NarrowShiftAmt = ShiftVal - HalfSize;
2007
2008 if (MI.getOpcode() == TargetOpcode::G_LSHR) {
2009 Register Narrowed = Unmerge.getReg(1);
2010
2011 // dst = G_LSHR s64:x, C for C >= 32
2012 // =>
2013 // lo, hi = G_UNMERGE_VALUES x
2014 // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
2015
2016 if (NarrowShiftAmt != 0) {
2017 Narrowed = Builder.buildLShr(HalfTy, Narrowed,
2018 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
2019 }
2020
2021 auto Zero = Builder.buildConstant(HalfTy, 0);
2022 Builder.buildMergeLikeInstr(DstReg, {Narrowed, Zero});
2023 } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
2024 Register Narrowed = Unmerge.getReg(0);
2025 // dst = G_SHL s64:x, C for C >= 32
2026 // =>
2027 // lo, hi = G_UNMERGE_VALUES x
2028 // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
2029 if (NarrowShiftAmt != 0) {
2030 Narrowed = Builder.buildShl(HalfTy, Narrowed,
2031 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
2032 }
2033
2034 auto Zero = Builder.buildConstant(HalfTy, 0);
2035 Builder.buildMergeLikeInstr(DstReg, {Zero, Narrowed});
2036 } else {
2037 assert(MI.getOpcode() == TargetOpcode::G_ASHR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ASHR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ASHR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2037, __extension__
__PRETTY_FUNCTION__))
;
2038 auto Hi = Builder.buildAShr(
2039 HalfTy, Unmerge.getReg(1),
2040 Builder.buildConstant(HalfTy, HalfSize - 1));
2041
2042 if (ShiftVal == HalfSize) {
2043 // (G_ASHR i64:x, 32) ->
2044 // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
2045 Builder.buildMergeLikeInstr(DstReg, {Unmerge.getReg(1), Hi});
2046 } else if (ShiftVal == Size - 1) {
2047 // Don't need a second shift.
2048 // (G_ASHR i64:x, 63) ->
2049 // %narrowed = (G_ASHR hi_32(x), 31)
2050 // G_MERGE_VALUES %narrowed, %narrowed
2051 Builder.buildMergeLikeInstr(DstReg, {Hi, Hi});
2052 } else {
2053 auto Lo = Builder.buildAShr(
2054 HalfTy, Unmerge.getReg(1),
2055 Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
2056
2057 // (G_ASHR i64:x, C) ->, for C >= 32
2058 // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
2059 Builder.buildMergeLikeInstr(DstReg, {Lo, Hi});
2060 }
2061 }
2062
2063 MI.eraseFromParent();
2064}
2065
2066bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
2067 unsigned TargetShiftAmount) {
2068 unsigned ShiftAmt;
2069 if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
2070 applyCombineShiftToUnmerge(MI, ShiftAmt);
2071 return true;
2072 }
2073
2074 return false;
2075}
2076
2077bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2078 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_INTTOPTR
&& "Expected a G_INTTOPTR") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_INTTOPTR && \"Expected a G_INTTOPTR\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2078, __extension__
__PRETTY_FUNCTION__))
;
2079 Register DstReg = MI.getOperand(0).getReg();
2080 LLT DstTy = MRI.getType(DstReg);
2081 Register SrcReg = MI.getOperand(1).getReg();
2082 return mi_match(SrcReg, MRI,
2083 m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
2084}
2085
2086void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2087 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_INTTOPTR
&& "Expected a G_INTTOPTR") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_INTTOPTR && \"Expected a G_INTTOPTR\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2087, __extension__
__PRETTY_FUNCTION__))
;
2088 Register DstReg = MI.getOperand(0).getReg();
2089 Builder.setInstr(MI);
2090 Builder.buildCopy(DstReg, Reg);
2091 MI.eraseFromParent();
2092}
2093
2094void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
2095 assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_PTRTOINT
&& "Expected a G_PTRTOINT") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_PTRTOINT && \"Expected a G_PTRTOINT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2095, __extension__
__PRETTY_FUNCTION__))
;
2096 Register DstReg = MI.getOperand(0).getReg();
2097 Builder.setInstr(MI);
2098 Builder.buildZExtOrTrunc(DstReg, Reg);
2099 MI.eraseFromParent();
2100}
2101
2102bool CombinerHelper::matchCombineAddP2IToPtrAdd(
2103 MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2104 assert(MI.getOpcode() == TargetOpcode::G_ADD)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ADD
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ADD"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2104, __extension__
__PRETTY_FUNCTION__))
;
2105 Register LHS = MI.getOperand(1).getReg();
2106 Register RHS = MI.getOperand(2).getReg();
2107 LLT IntTy = MRI.getType(LHS);
2108
2109 // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
2110 // instruction.
2111 PtrReg.second = false;
2112 for (Register SrcReg : {LHS, RHS}) {
2113 if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
2114 // Don't handle cases where the integer is implicitly converted to the
2115 // pointer width.
2116 LLT PtrTy = MRI.getType(PtrReg.first);
2117 if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
2118 return true;
2119 }
2120
2121 PtrReg.second = true;
2122 }
2123
2124 return false;
2125}
2126
2127void CombinerHelper::applyCombineAddP2IToPtrAdd(
2128 MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2129 Register Dst = MI.getOperand(0).getReg();
2130 Register LHS = MI.getOperand(1).getReg();
2131 Register RHS = MI.getOperand(2).getReg();
2132
2133 const bool DoCommute = PtrReg.second;
2134 if (DoCommute)
2135 std::swap(LHS, RHS);
2136 LHS = PtrReg.first;
2137
2138 LLT PtrTy = MRI.getType(LHS);
2139
2140 Builder.setInstrAndDebugLoc(MI);
2141 auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
2142 Builder.buildPtrToInt(Dst, PtrAdd);
2143 MI.eraseFromParent();
2144}
2145
2146bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
2147 APInt &NewCst) {
2148 auto &PtrAdd = cast<GPtrAdd>(MI);
2149 Register LHS = PtrAdd.getBaseReg();
2150 Register RHS = PtrAdd.getOffsetReg();
2151 MachineRegisterInfo &MRI = Builder.getMF().getRegInfo();
2152
2153 if (auto RHSCst = getIConstantVRegVal(RHS, MRI)) {
2154 APInt Cst;
2155 if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) {
2156 auto DstTy = MRI.getType(PtrAdd.getReg(0));
2157 // G_INTTOPTR uses zero-extension
2158 NewCst = Cst.zextOrTrunc(DstTy.getSizeInBits());
2159 NewCst += RHSCst->sextOrTrunc(DstTy.getSizeInBits());
2160 return true;
2161 }
2162 }
2163
2164 return false;
2165}
2166
2167void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
2168 APInt &NewCst) {
2169 auto &PtrAdd = cast<GPtrAdd>(MI);
2170 Register Dst = PtrAdd.getReg(0);
2171
2172 Builder.setInstrAndDebugLoc(MI);
2173 Builder.buildConstant(Dst, NewCst);
2174 PtrAdd.eraseFromParent();
2175}
2176
2177bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
2178 assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ANYEXT
&& "Expected a G_ANYEXT") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_ANYEXT && \"Expected a G_ANYEXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2178, __extension__
__PRETTY_FUNCTION__))
;
2179 Register DstReg = MI.getOperand(0).getReg();
2180 Register SrcReg = MI.getOperand(1).getReg();
2181 LLT DstTy = MRI.getType(DstReg);
2182 return mi_match(SrcReg, MRI,
2183 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))));
2184}
2185
2186bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) {
2187 assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ZEXT
&& "Expected a G_ZEXT") ? void (0) : __assert_fail (
"MI.getOpcode() == TargetOpcode::G_ZEXT && \"Expected a G_ZEXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2187, __extension__
__PRETTY_FUNCTION__))
;
2188 Register DstReg = MI.getOperand(0).getReg();
2189 Register SrcReg = MI.getOperand(1).getReg();
2190 LLT DstTy = MRI.getType(DstReg);
2191 if (mi_match(SrcReg, MRI,
2192 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) {
2193 unsigned DstSize = DstTy.getScalarSizeInBits();
2194 unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits();
2195 return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize;
2196 }
2197 return false;
2198}
2199
2200bool CombinerHelper::matchCombineExtOfExt(
2201 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2202 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT
|| MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() ==
TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void
(0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2205, __extension__
__PRETTY_FUNCTION__))
2203 MI.getOpcode() == TargetOpcode::G_SEXT ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT
|| MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() ==
TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void
(0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2205, __extension__
__PRETTY_FUNCTION__))
2204 MI.getOpcode() == TargetOpcode::G_ZEXT) &&(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT
|| MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() ==
TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void
(0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2205, __extension__
__PRETTY_FUNCTION__))
2205 "Expected a G_[ASZ]EXT")(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT
|| MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() ==
TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void
(0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2205, __extension__
__PRETTY_FUNCTION__))
;
2206 Register SrcReg = MI.getOperand(1).getReg();
2207 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2208 // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
2209 unsigned Opc = MI.getOpcode();
2210 unsigned SrcOpc = SrcMI->getOpcode();
2211 if (Opc == SrcOpc ||
2212 (Opc == TargetOpcode::G_ANYEXT &&
2213 (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2214 (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2215 MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc);
2216 return true;
2217 }
2218 return false;
2219}
2220
2221void CombinerHelper::applyCombineExtOfExt(
2222 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2223 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT
|| MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() ==
TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void
(0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2226, __extension__
__PRETTY_FUNCTION__))
2224 MI.getOpcode() == TargetOpcode::G_SEXT ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT
|| MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() ==
TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void
(0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2226, __extension__
__PRETTY_FUNCTION__))
2225 MI.getOpcode() == TargetOpcode::G_ZEXT) &&(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT
|| MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() ==
TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void
(0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2226, __extension__
__PRETTY_FUNCTION__))
2226 "Expected a G_[ASZ]EXT")(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT
|| MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() ==
TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void
(0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2226, __extension__
__PRETTY_FUNCTION__))
;
2227
2228 Register Reg = std::get<0>(MatchInfo);
2229 unsigned SrcExtOp = std::get<1>(MatchInfo);
2230
2231 // Combine exts with the same opcode.
2232 if (MI.getOpcode() == SrcExtOp) {
2233 Observer.changingInstr(MI);
2234 MI.getOperand(1).setReg(Reg);
2235 Observer.changedInstr(MI);
2236 return;
2237 }
2238
2239 // Combine:
2240 // - anyext([sz]ext x) to [sz]ext x
2241 // - sext(zext x) to zext x
2242 if (MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2243 (MI.getOpcode() == TargetOpcode::G_SEXT &&
2244 SrcExtOp == TargetOpcode::G_ZEXT)) {
2245 Register DstReg = MI.getOperand(0).getReg();
2246 Builder.setInstrAndDebugLoc(MI);
2247 Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
2248 MI.eraseFromParent();
2249 }
2250}
2251
2252void CombinerHelper::applyCombineMulByNegativeOne(MachineInstr &MI) {
2253 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_MUL
&& "Expected a G_MUL") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_MUL && \"Expected a G_MUL\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2253, __extension__
__PRETTY_FUNCTION__))
;
2254 Register DstReg = MI.getOperand(0).getReg();
2255 Register SrcReg = MI.getOperand(1).getReg();
2256 LLT DstTy = MRI.getType(DstReg);
2257
2258 Builder.setInstrAndDebugLoc(MI);
2259 Builder.buildSub(DstReg, Builder.buildConstant(DstTy, 0), SrcReg,
2260 MI.getFlags());
2261 MI.eraseFromParent();
2262}
2263
2264bool CombinerHelper::matchCombineFAbsOfFNeg(MachineInstr &MI,
2265 BuildFnTy &MatchInfo) {
2266 assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FABS
&& "Expected a G_FABS") ? void (0) : __assert_fail (
"MI.getOpcode() == TargetOpcode::G_FABS && \"Expected a G_FABS\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2266, __extension__
__PRETTY_FUNCTION__))
;
2267 Register Src = MI.getOperand(1).getReg();
2268 Register NegSrc;
2269
2270 if (!mi_match(Src, MRI, m_GFNeg(m_Reg(NegSrc))))
2271 return false;
2272
2273 MatchInfo = [=, &MI](MachineIRBuilder &B) {
2274 Observer.changingInstr(MI);
2275 MI.getOperand(1).setReg(NegSrc);
2276 Observer.changedInstr(MI);
2277 };
2278 return true;
2279}
2280
2281bool CombinerHelper::matchCombineTruncOfExt(
2282 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2283 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_TRUNC
&& "Expected a G_TRUNC") ? void (0) : __assert_fail (
"MI.getOpcode() == TargetOpcode::G_TRUNC && \"Expected a G_TRUNC\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2283, __extension__
__PRETTY_FUNCTION__))
;
2284 Register SrcReg = MI.getOperand(1).getReg();
2285 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2286 unsigned SrcOpc = SrcMI->getOpcode();
2287 if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2288 SrcOpc == TargetOpcode::G_ZEXT) {
2289 MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc);
2290 return true;
2291 }
2292 return false;
2293}
2294
2295void CombinerHelper::applyCombineTruncOfExt(
2296 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2297 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_TRUNC
&& "Expected a G_TRUNC") ? void (0) : __assert_fail (
"MI.getOpcode() == TargetOpcode::G_TRUNC && \"Expected a G_TRUNC\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2297, __extension__
__PRETTY_FUNCTION__))
;
2298 Register SrcReg = MatchInfo.first;
2299 unsigned SrcExtOp = MatchInfo.second;
2300 Register DstReg = MI.getOperand(0).getReg();
2301 LLT SrcTy = MRI.getType(SrcReg);
2302 LLT DstTy = MRI.getType(DstReg);
2303 if (SrcTy == DstTy) {
2304 MI.eraseFromParent();
2305 replaceRegWith(MRI, DstReg, SrcReg);
2306 return;
2307 }
2308 Builder.setInstrAndDebugLoc(MI);
2309 if (SrcTy.getSizeInBits() < DstTy.getSizeInBits())
2310 Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg});
2311 else
2312 Builder.buildTrunc(DstReg, SrcReg);
2313 MI.eraseFromParent();
2314}
2315
2316static LLT getMidVTForTruncRightShiftCombine(LLT ShiftTy, LLT TruncTy) {
2317 const unsigned ShiftSize = ShiftTy.getScalarSizeInBits();
2318 const unsigned TruncSize = TruncTy.getScalarSizeInBits();
2319
2320 // ShiftTy > 32 > TruncTy -> 32
2321 if (ShiftSize > 32 && TruncSize < 32)
2322 return ShiftTy.changeElementSize(32);
2323
2324 // TODO: We could also reduce to 16 bits, but that's more target-dependent.
2325 // Some targets like it, some don't, some only like it under certain
2326 // conditions/processor versions, etc.
2327 // A TL hook might be needed for this.
2328
2329 // Don't combine
2330 return ShiftTy;
2331}
2332
2333bool CombinerHelper::matchCombineTruncOfShift(
2334 MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
2335 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_TRUNC
&& "Expected a G_TRUNC") ? void (0) : __assert_fail (
"MI.getOpcode() == TargetOpcode::G_TRUNC && \"Expected a G_TRUNC\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2335, __extension__
__PRETTY_FUNCTION__))
;
2336 Register DstReg = MI.getOperand(0).getReg();
2337 Register SrcReg = MI.getOperand(1).getReg();
2338
2339 if (!MRI.hasOneNonDBGUse(SrcReg))
2340 return false;
2341
2342 LLT SrcTy = MRI.getType(SrcReg);
2343 LLT DstTy = MRI.getType(DstReg);
2344
2345 MachineInstr *SrcMI = getDefIgnoringCopies(SrcReg, MRI);
2346 const auto &TL = getTargetLowering();
2347
2348 LLT NewShiftTy;
2349 switch (SrcMI->getOpcode()) {
2350 default:
2351 return false;
2352 case TargetOpcode::G_SHL: {
2353 NewShiftTy = DstTy;
2354
2355 // Make sure new shift amount is legal.
2356 KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg());
2357 if (Known.getMaxValue().uge(NewShiftTy.getScalarSizeInBits()))
2358 return false;
2359 break;
2360 }
2361 case TargetOpcode::G_LSHR:
2362 case TargetOpcode::G_ASHR: {
2363 // For right shifts, we conservatively do not do the transform if the TRUNC
2364 // has any STORE users. The reason is that if we change the type of the
2365 // shift, we may break the truncstore combine.
2366 //
2367 // TODO: Fix truncstore combine to handle (trunc(lshr (trunc x), k)).
2368 for (auto &User : MRI.use_instructions(DstReg))
2369 if (User.getOpcode() == TargetOpcode::G_STORE)
2370 return false;
2371
2372 NewShiftTy = getMidVTForTruncRightShiftCombine(SrcTy, DstTy);
2373 if (NewShiftTy == SrcTy)
2374 return false;
2375
2376 // Make sure we won't lose information by truncating the high bits.
2377 KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg());
2378 if (Known.getMaxValue().ugt(NewShiftTy.getScalarSizeInBits() -
2379 DstTy.getScalarSizeInBits()))
2380 return false;
2381 break;
2382 }
2383 }
2384
2385 if (!isLegalOrBeforeLegalizer(
2386 {SrcMI->getOpcode(),
2387 {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}}))
2388 return false;
2389
2390 MatchInfo = std::make_pair(SrcMI, NewShiftTy);
2391 return true;
2392}
2393
2394void CombinerHelper::applyCombineTruncOfShift(
2395 MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
2396 Builder.setInstrAndDebugLoc(MI);
2397
2398 MachineInstr *ShiftMI = MatchInfo.first;
2399 LLT NewShiftTy = MatchInfo.second;
2400
2401 Register Dst = MI.getOperand(0).getReg();
2402 LLT DstTy = MRI.getType(Dst);
2403
2404 Register ShiftAmt = ShiftMI->getOperand(2).getReg();
2405 Register ShiftSrc = ShiftMI->getOperand(1).getReg();
2406 ShiftSrc = Builder.buildTrunc(NewShiftTy, ShiftSrc).getReg(0);
2407
2408 Register NewShift =
2409 Builder
2410 .buildInstr(ShiftMI->getOpcode(), {NewShiftTy}, {ShiftSrc, ShiftAmt})
2411 .getReg(0);
2412
2413 if (NewShiftTy == DstTy)
2414 replaceRegWith(MRI, Dst, NewShift);
2415 else
2416 Builder.buildTrunc(Dst, NewShift);
2417
2418 eraseInst(MI);
2419}
2420
2421bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
2422 return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2423 return MO.isReg() &&
2424 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2425 });
2426}
2427
2428bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
2429 return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2430 return !MO.isReg() ||
2431 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2432 });
2433}
2434
2435bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
2436 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2436, __extension__
__PRETTY_FUNCTION__))
;
2437 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
2438 return all_of(Mask, [](int Elt) { return Elt < 0; });
2439}
2440
2441bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
2442 assert(MI.getOpcode() == TargetOpcode::G_STORE)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_STORE
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_STORE"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2442, __extension__
__PRETTY_FUNCTION__))
;
2443 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
2444 MRI);
2445}
2446
2447bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) {
2448 assert(MI.getOpcode() == TargetOpcode::G_SELECT)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SELECT
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SELECT"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2448, __extension__
__PRETTY_FUNCTION__))
;
2449 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
2450 MRI);
2451}
2452
2453bool CombinerHelper::matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) {
2454 assert((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT
|| MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
"Expected an insert/extract element op") ? void (0) : __assert_fail
("(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT || MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) && \"Expected an insert/extract element op\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2456, __extension__
__PRETTY_FUNCTION__))
2455 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT
|| MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
"Expected an insert/extract element op") ? void (0) : __assert_fail
("(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT || MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) && \"Expected an insert/extract element op\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2456, __extension__
__PRETTY_FUNCTION__))
2456 "Expected an insert/extract element op")(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT
|| MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
"Expected an insert/extract element op") ? void (0) : __assert_fail
("(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT || MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) && \"Expected an insert/extract element op\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2456, __extension__
__PRETTY_FUNCTION__))
;
2457 LLT VecTy = MRI.getType(MI.getOperand(1).getReg());
2458 unsigned IdxIdx =
2459 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
2460 auto Idx = getIConstantVRegVal(MI.getOperand(IdxIdx).getReg(), MRI);
2461 if (!Idx)
2462 return false;
2463 return Idx->getZExtValue() >= VecTy.getNumElements();
2464}
2465
2466bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
2467 GSelect &SelMI = cast<GSelect>(MI);
2468 auto Cst =
2469 isConstantOrConstantSplatVector(*MRI.getVRegDef(SelMI.getCondReg()), MRI);
2470 if (!Cst)
2471 return false;
2472 OpIdx = Cst->isZero() ? 3 : 2;
2473 return true;
2474}
2475
2476bool CombinerHelper::eraseInst(MachineInstr &MI) {
2477 MI.eraseFromParent();
2478 return true;
2479}
2480
2481bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
2482 const MachineOperand &MOP2) {
2483 if (!MOP1.isReg() || !MOP2.isReg())
2484 return false;
2485 auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI);
2486 if (!InstAndDef1)
2487 return false;
2488 auto InstAndDef2 = getDefSrcRegIgnoringCopies(MOP2.getReg(), MRI);
2489 if (!InstAndDef2)
2490 return false;
2491 MachineInstr *I1 = InstAndDef1->MI;
2492 MachineInstr *I2 = InstAndDef2->MI;
2493
2494 // Handle a case like this:
2495 //
2496 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2497 //
2498 // Even though %0 and %1 are produced by the same instruction they are not
2499 // the same values.
2500 if (I1 == I2)
2501 return MOP1.getReg() == MOP2.getReg();
2502
2503 // If we have an instruction which loads or stores, we can't guarantee that
2504 // it is identical.
2505 //
2506 // For example, we may have
2507 //
2508 // %x1 = G_LOAD %addr (load N from @somewhere)
2509 // ...
2510 // call @foo
2511 // ...
2512 // %x2 = G_LOAD %addr (load N from @somewhere)
2513 // ...
2514 // %or = G_OR %x1, %x2
2515 //
2516 // It's possible that @foo will modify whatever lives at the address we're
2517 // loading from. To be safe, let's just assume that all loads and stores
2518 // are different (unless we have something which is guaranteed to not
2519 // change.)
2520 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
2521 return false;
2522
2523 // If both instructions are loads or stores, they are equal only if both
2524 // are dereferenceable invariant loads with the same number of bits.
2525 if (I1->mayLoadOrStore() && I2->mayLoadOrStore()) {
2526 GLoadStore *LS1 = dyn_cast<GLoadStore>(I1);
2527 GLoadStore *LS2 = dyn_cast<GLoadStore>(I2);
2528 if (!LS1 || !LS2)
2529 return false;
2530
2531 if (!I2->isDereferenceableInvariantLoad() ||
2532 (LS1->getMemSizeInBits() != LS2->getMemSizeInBits()))
2533 return false;
2534 }
2535
2536 // Check for physical registers on the instructions first to avoid cases
2537 // like this:
2538 //
2539 // %a = COPY $physreg
2540 // ...
2541 // SOMETHING implicit-def $physreg
2542 // ...
2543 // %b = COPY $physreg
2544 //
2545 // These copies are not equivalent.
2546 if (any_of(I1->uses(), [](const MachineOperand &MO) {
2547 return MO.isReg() && MO.getReg().isPhysical();
2548 })) {
2549 // Check if we have a case like this:
2550 //
2551 // %a = COPY $physreg
2552 // %b = COPY %a
2553 //
2554 // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2555 // From that, we know that they must have the same value, since they must
2556 // have come from the same COPY.
2557 return I1->isIdenticalTo(*I2);
2558 }
2559
2560 // We don't have any physical registers, so we don't necessarily need the
2561 // same vreg defs.
2562 //
2563 // On the off-chance that there's some target instruction feeding into the
2564 // instruction, let's use produceSameValue instead of isIdenticalTo.
2565 if (Builder.getTII().produceSameValue(*I1, *I2, &MRI)) {
2566 // Handle instructions with multiple defs that produce same values. Values
2567 // are same for operands with same index.
2568 // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2569 // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2570 // I1 and I2 are different instructions but produce same values,
2571 // %1 and %6 are same, %1 and %7 are not the same value.
2572 return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
2573 I2->findRegisterDefOperandIdx(InstAndDef2->Reg);
2574 }
2575 return false;
2576}
2577
2578bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
2579 if (!MOP.isReg())
2580 return false;
2581 auto *MI = MRI.getVRegDef(MOP.getReg());
2582 auto MaybeCst = isConstantOrConstantSplatVector(*MI, MRI);
2583 return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2584 MaybeCst->getSExtValue() == C;
2585}
2586
2587bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
2588 unsigned OpIdx) {
2589 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?")(static_cast <bool> (MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?") ? void (0) : __assert_fail ("MI.getNumExplicitDefs() == 1 && \"Expected one explicit def?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2589, __extension__
__PRETTY_FUNCTION__))
;
2590 Register OldReg = MI.getOperand(0).getReg();
2591 Register Replacement = MI.getOperand(OpIdx).getReg();
2592 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?")(static_cast <bool> (canReplaceReg(OldReg, Replacement,
MRI) && "Cannot replace register?") ? void (0) : __assert_fail
("canReplaceReg(OldReg, Replacement, MRI) && \"Cannot replace register?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2592, __extension__
__PRETTY_FUNCTION__))
;
2593 MI.eraseFromParent();
2594 replaceRegWith(MRI, OldReg, Replacement);
2595 return true;
2596}
2597
2598bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
2599 Register Replacement) {
2600 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?")(static_cast <bool> (MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?") ? void (0) : __assert_fail ("MI.getNumExplicitDefs() == 1 && \"Expected one explicit def?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2600, __extension__
__PRETTY_FUNCTION__))
;
2601 Register OldReg = MI.getOperand(0).getReg();
2602 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?")(static_cast <bool> (canReplaceReg(OldReg, Replacement,
MRI) && "Cannot replace register?") ? void (0) : __assert_fail
("canReplaceReg(OldReg, Replacement, MRI) && \"Cannot replace register?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2602, __extension__
__PRETTY_FUNCTION__))
;
2603 MI.eraseFromParent();
2604 replaceRegWith(MRI, OldReg, Replacement);
2605 return true;
2606}
2607
2608bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
2609 assert(MI.getOpcode() == TargetOpcode::G_SELECT)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SELECT
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SELECT"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2609, __extension__
__PRETTY_FUNCTION__))
;
2610 // Match (cond ? x : x)
2611 return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
2612 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
2613 MRI);
2614}
2615
2616bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
2617 return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
2618 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
2619 MRI);
2620}
2621
2622bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
2623 return matchConstantOp(MI.getOperand(OpIdx), 0) &&
2624 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
2625 MRI);
2626}
2627
2628bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) {
2629 MachineOperand &MO = MI.getOperand(OpIdx);
2630 return MO.isReg() &&
2631 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2632}
2633
2634bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
2635 unsigned OpIdx) {
2636 MachineOperand &MO = MI.getOperand(OpIdx);
2637 return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB);
2638}
2639
2640bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
2641 assert(MI.getNumDefs() == 1 && "Expected only one def?")(static_cast <bool> (MI.getNumDefs() == 1 && "Expected only one def?"
) ? void (0) : __assert_fail ("MI.getNumDefs() == 1 && \"Expected only one def?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2641, __extension__
__PRETTY_FUNCTION__))
;
2642 Builder.setInstr(MI);
2643 Builder.buildFConstant(MI.getOperand(0), C);
2644 MI.eraseFromParent();
2645 return true;
2646}
2647
2648bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
2649 assert(MI.getNumDefs() == 1 && "Expected only one def?")(static_cast <bool> (MI.getNumDefs() == 1 && "Expected only one def?"
) ? void (0) : __assert_fail ("MI.getNumDefs() == 1 && \"Expected only one def?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2649, __extension__
__PRETTY_FUNCTION__))
;
2650 Builder.setInstr(MI);
2651 Builder.buildConstant(MI.getOperand(0), C);
2652 MI.eraseFromParent();
2653 return true;
2654}
2655
2656bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) {
2657 assert(MI.getNumDefs() == 1 && "Expected only one def?")(static_cast <bool> (MI.getNumDefs() == 1 && "Expected only one def?"
) ? void (0) : __assert_fail ("MI.getNumDefs() == 1 && \"Expected only one def?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2657, __extension__
__PRETTY_FUNCTION__))
;
2658 Builder.setInstr(MI);
2659 Builder.buildConstant(MI.getOperand(0), C);
2660 MI.eraseFromParent();
2661 return true;
2662}
2663
2664bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
2665 assert(MI.getNumDefs() == 1 && "Expected only one def?")(static_cast <bool> (MI.getNumDefs() == 1 && "Expected only one def?"
) ? void (0) : __assert_fail ("MI.getNumDefs() == 1 && \"Expected only one def?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2665, __extension__
__PRETTY_FUNCTION__))
;
2666 Builder.setInstr(MI);
2667 Builder.buildUndef(MI.getOperand(0));
2668 MI.eraseFromParent();
2669 return true;
2670}
2671
2672bool CombinerHelper::matchSimplifyAddToSub(
2673 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2674 Register LHS = MI.getOperand(1).getReg();
2675 Register RHS = MI.getOperand(2).getReg();
2676 Register &NewLHS = std::get<0>(MatchInfo);
2677 Register &NewRHS = std::get<1>(MatchInfo);
2678
2679 // Helper lambda to check for opportunities for
2680 // ((0-A) + B) -> B - A
2681 // (A + (0-B)) -> A - B
2682 auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
2683 if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS))))
2684 return false;
2685 NewLHS = MaybeNewLHS;
2686 return true;
2687 };
2688
2689 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
2690}
2691
2692bool CombinerHelper::matchCombineInsertVecElts(
2693 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2694 assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT
&& "Invalid opcode") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT && \"Invalid opcode\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2695, __extension__
__PRETTY_FUNCTION__))
2695 "Invalid opcode")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT
&& "Invalid opcode") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT && \"Invalid opcode\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2695, __extension__
__PRETTY_FUNCTION__))
;
2696 Register DstReg = MI.getOperand(0).getReg();
2697 LLT DstTy = MRI.getType(DstReg);
2698 assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?")(static_cast <bool> (DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?"
) ? void (0) : __assert_fail ("DstTy.isVector() && \"Invalid G_INSERT_VECTOR_ELT?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2698, __extension__
__PRETTY_FUNCTION__))
;
2699 unsigned NumElts = DstTy.getNumElements();
2700 // If this MI is part of a sequence of insert_vec_elts, then
2701 // don't do the combine in the middle of the sequence.
2702 if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() ==
2703 TargetOpcode::G_INSERT_VECTOR_ELT)
2704 return false;
2705 MachineInstr *CurrInst = &MI;
2706 MachineInstr *TmpInst;
2707 int64_t IntImm;
2708 Register TmpReg;
2709 MatchInfo.resize(NumElts);
2710 while (mi_match(
2711 CurrInst->getOperand(0).getReg(), MRI,
2712 m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) {
2713 if (IntImm >= NumElts || IntImm < 0)
2714 return false;
2715 if (!MatchInfo[IntImm])
2716 MatchInfo[IntImm] = TmpReg;
2717 CurrInst = TmpInst;
2718 }
2719 // Variable index.
2720 if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2721 return false;
2722 if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2723 for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) {
2724 if (!MatchInfo[I - 1].isValid())
2725 MatchInfo[I - 1] = TmpInst->getOperand(I).getReg();
2726 }
2727 return true;
2728 }
2729 // If we didn't end in a G_IMPLICIT_DEF, bail out.
2730 return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2731}
2732
2733void CombinerHelper::applyCombineInsertVecElts(
2734 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2735 Builder.setInstr(MI);
2736 Register UndefReg;
2737 auto GetUndef = [&]() {
2738 if (UndefReg)
2739 return UndefReg;
2740 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2741 UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0);
2742 return UndefReg;
2743 };
2744 for (unsigned I = 0; I < MatchInfo.size(); ++I) {
2745 if (!MatchInfo[I])
2746 MatchInfo[I] = GetUndef();
2747 }
2748 Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo);
2749 MI.eraseFromParent();
2750}
2751
2752void CombinerHelper::applySimplifyAddToSub(
2753 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2754 Builder.setInstr(MI);
2755 Register SubLHS, SubRHS;
2756 std::tie(SubLHS, SubRHS) = MatchInfo;
2757 Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
2758 MI.eraseFromParent();
2759}
2760
2761bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
2762 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2763 // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2764 //
2765 // Creates the new hand + logic instruction (but does not insert them.)
2766 //
2767 // On success, MatchInfo is populated with the new instructions. These are
2768 // inserted in applyHoistLogicOpWithSameOpcodeHands.
2769 unsigned LogicOpcode = MI.getOpcode();
2770 assert(LogicOpcode == TargetOpcode::G_AND ||(static_cast <bool> (LogicOpcode == TargetOpcode::G_AND
|| LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode
::G_XOR) ? void (0) : __assert_fail ("LogicOpcode == TargetOpcode::G_AND || LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode::G_XOR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2772, __extension__
__PRETTY_FUNCTION__))
2771 LogicOpcode == TargetOpcode::G_OR ||(static_cast <bool> (LogicOpcode == TargetOpcode::G_AND
|| LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode
::G_XOR) ? void (0) : __assert_fail ("LogicOpcode == TargetOpcode::G_AND || LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode::G_XOR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2772, __extension__
__PRETTY_FUNCTION__))
2772 LogicOpcode == TargetOpcode::G_XOR)(static_cast <bool> (LogicOpcode == TargetOpcode::G_AND
|| LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode
::G_XOR) ? void (0) : __assert_fail ("LogicOpcode == TargetOpcode::G_AND || LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode::G_XOR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2772, __extension__
__PRETTY_FUNCTION__))
;
2773 MachineIRBuilder MIB(MI);
2774 Register Dst = MI.getOperand(0).getReg();
2775 Register LHSReg = MI.getOperand(1).getReg();
2776 Register RHSReg = MI.getOperand(2).getReg();
2777
2778 // Don't recompute anything.
2779 if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
2780 return false;
2781
2782 // Make sure we have (hand x, ...), (hand y, ...)
2783 MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
2784 MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
2785 if (!LeftHandInst || !RightHandInst)
2786 return false;
2787 unsigned HandOpcode = LeftHandInst->getOpcode();
2788 if (HandOpcode != RightHandInst->getOpcode())
2789 return false;
2790 if (!LeftHandInst->getOperand(1).isReg() ||
2791 !RightHandInst->getOperand(1).isReg())
2792 return false;
2793
2794 // Make sure the types match up, and if we're doing this post-legalization,
2795 // we end up with legal types.
2796 Register X = LeftHandInst->getOperand(1).getReg();
2797 Register Y = RightHandInst->getOperand(1).getReg();
2798 LLT XTy = MRI.getType(X);
2799 LLT YTy = MRI.getType(Y);
2800 if (!XTy.isValid() || XTy != YTy)
2801 return false;
2802
2803 // Optional extra source register.
2804 Register ExtraHandOpSrcReg;
2805 switch (HandOpcode) {
2806 default:
2807 return false;
2808 case TargetOpcode::G_ANYEXT:
2809 case TargetOpcode::G_SEXT:
2810 case TargetOpcode::G_ZEXT: {
2811 // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2812 break;
2813 }
2814 case TargetOpcode::G_AND:
2815 case TargetOpcode::G_ASHR:
2816 case TargetOpcode::G_LSHR:
2817 case TargetOpcode::G_SHL: {
2818 // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
2819 MachineOperand &ZOp = LeftHandInst->getOperand(2);
2820 if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
2821 return false;
2822 ExtraHandOpSrcReg = ZOp.getReg();
2823 break;
2824 }
2825 }
2826
2827 if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
2828 return false;
2829
2830 // Record the steps to build the new instructions.
2831 //
2832 // Steps to build (logic x, y)
2833 auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
2834 OperandBuildSteps LogicBuildSteps = {
2835 [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
2836 [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
2837 [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
2838 InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
2839
2840 // Steps to build hand (logic x, y), ...z
2841 OperandBuildSteps HandBuildSteps = {
2842 [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
2843 [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
2844 if (ExtraHandOpSrcReg.isValid())
2845 HandBuildSteps.push_back(
2846 [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
2847 InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
2848
2849 MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
2850 return true;
2851}
2852
2853void CombinerHelper::applyBuildInstructionSteps(
2854 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2855 assert(MatchInfo.InstrsToBuild.size() &&(static_cast <bool> (MatchInfo.InstrsToBuild.size() &&
"Expected at least one instr to build?") ? void (0) : __assert_fail
("MatchInfo.InstrsToBuild.size() && \"Expected at least one instr to build?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2856, __extension__
__PRETTY_FUNCTION__))
2856 "Expected at least one instr to build?")(static_cast <bool> (MatchInfo.InstrsToBuild.size() &&
"Expected at least one instr to build?") ? void (0) : __assert_fail
("MatchInfo.InstrsToBuild.size() && \"Expected at least one instr to build?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2856, __extension__
__PRETTY_FUNCTION__))
;
2857 Builder.setInstr(MI);
2858 for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
2859 assert(InstrToBuild.Opcode && "Expected a valid opcode?")(static_cast <bool> (InstrToBuild.Opcode && "Expected a valid opcode?"
) ? void (0) : __assert_fail ("InstrToBuild.Opcode && \"Expected a valid opcode?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2859, __extension__
__PRETTY_FUNCTION__))
;
2860 assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?")(static_cast <bool> (InstrToBuild.OperandFns.size() &&
"Expected at least one operand?") ? void (0) : __assert_fail
("InstrToBuild.OperandFns.size() && \"Expected at least one operand?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2860, __extension__
__PRETTY_FUNCTION__))
;
2861 MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
2862 for (auto &OperandFn : InstrToBuild.OperandFns)
2863 OperandFn(Instr);
2864 }
2865 MI.eraseFromParent();
2866}
2867
2868bool CombinerHelper::matchAshrShlToSextInreg(
2869 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2870 assert(MI.getOpcode() == TargetOpcode::G_ASHR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ASHR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ASHR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2870, __extension__
__PRETTY_FUNCTION__))
;
2871 int64_t ShlCst, AshrCst;
2872 Register Src;
2873 if (!mi_match(MI.getOperand(0).getReg(), MRI,
2874 m_GAShr(m_GShl(m_Reg(Src), m_ICstOrSplat(ShlCst)),
2875 m_ICstOrSplat(AshrCst))))
2876 return false;
2877 if (ShlCst != AshrCst)
2878 return false;
2879 if (!isLegalOrBeforeLegalizer(
2880 {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
2881 return false;
2882 MatchInfo = std::make_tuple(Src, ShlCst);
2883 return true;
2884}
2885
2886void CombinerHelper::applyAshShlToSextInreg(
2887 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2888 assert(MI.getOpcode() == TargetOpcode::G_ASHR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ASHR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ASHR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2888, __extension__
__PRETTY_FUNCTION__))
;
2889 Register Src;
2890 int64_t ShiftAmt;
2891 std::tie(Src, ShiftAmt) = MatchInfo;
2892 unsigned Size = MRI.getType(Src).getScalarSizeInBits();
2893 Builder.setInstrAndDebugLoc(MI);
2894 Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
2895 MI.eraseFromParent();
2896}
2897
2898/// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
2899bool CombinerHelper::matchOverlappingAnd(
2900 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
2901 assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2901, __extension__
__PRETTY_FUNCTION__))
;
2902
2903 Register Dst = MI.getOperand(0).getReg();
2904 LLT Ty = MRI.getType(Dst);
2905
2906 Register R;
2907 int64_t C1;
2908 int64_t C2;
2909 if (!mi_match(
2910 Dst, MRI,
2911 m_GAnd(m_GAnd(m_Reg(R), m_ICst(C1)), m_ICst(C2))))
2912 return false;
2913
2914 MatchInfo = [=](MachineIRBuilder &B) {
2915 if (C1 & C2) {
2916 B.buildAnd(Dst, R, B.buildConstant(Ty, C1 & C2));
2917 return;
2918 }
2919 auto Zero = B.buildConstant(Ty, 0);
2920 replaceRegWith(MRI, Dst, Zero->getOperand(0).getReg());
2921 };
2922 return true;
2923}
2924
2925bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
2926 Register &Replacement) {
2927 // Given
2928 //
2929 // %y:_(sN) = G_SOMETHING
2930 // %x:_(sN) = G_SOMETHING
2931 // %res:_(sN) = G_AND %x, %y
2932 //
2933 // Eliminate the G_AND when it is known that x & y == x or x & y == y.
2934 //
2935 // Patterns like this can appear as a result of legalization. E.g.
2936 //
2937 // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
2938 // %one:_(s32) = G_CONSTANT i32 1
2939 // %and:_(s32) = G_AND %cmp, %one
2940 //
2941 // In this case, G_ICMP only produces a single bit, so x & 1 == x.
2942 assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2942, __extension__
__PRETTY_FUNCTION__))
;
2943 if (!KB)
2944 return false;
2945
2946 Register AndDst = MI.getOperand(0).getReg();
2947 Register LHS = MI.getOperand(1).getReg();
2948 Register RHS = MI.getOperand(2).getReg();
2949 KnownBits LHSBits = KB->getKnownBits(LHS);
2950 KnownBits RHSBits = KB->getKnownBits(RHS);
2951
2952 // Check that x & Mask == x.
2953 // x & 1 == x, always
2954 // x & 0 == x, only if x is also 0
2955 // Meaning Mask has no effect if every bit is either one in Mask or zero in x.
2956 //
2957 // Check if we can replace AndDst with the LHS of the G_AND
2958 if (canReplaceReg(AndDst, LHS, MRI) &&
2959 (LHSBits.Zero | RHSBits.One).isAllOnes()) {
2960 Replacement = LHS;
2961 return true;
2962 }
2963
2964 // Check if we can replace AndDst with the RHS of the G_AND
2965 if (canReplaceReg(AndDst, RHS, MRI) &&
2966 (LHSBits.One | RHSBits.Zero).isAllOnes()) {
2967 Replacement = RHS;
2968 return true;
2969 }
2970
2971 return false;
2972}
2973
2974bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) {
2975 // Given
2976 //
2977 // %y:_(sN) = G_SOMETHING
2978 // %x:_(sN) = G_SOMETHING
2979 // %res:_(sN) = G_OR %x, %y
2980 //
2981 // Eliminate the G_OR when it is known that x | y == x or x | y == y.
2982 assert(MI.getOpcode() == TargetOpcode::G_OR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_OR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_OR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2982, __extension__
__PRETTY_FUNCTION__))
;
2983 if (!KB)
2984 return false;
2985
2986 Register OrDst = MI.getOperand(0).getReg();
2987 Register LHS = MI.getOperand(1).getReg();
2988 Register RHS = MI.getOperand(2).getReg();
2989 KnownBits LHSBits = KB->getKnownBits(LHS);
2990 KnownBits RHSBits = KB->getKnownBits(RHS);
2991
2992 // Check that x | Mask == x.
2993 // x | 0 == x, always
2994 // x | 1 == x, only if x is also 1
2995 // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
2996 //
2997 // Check if we can replace OrDst with the LHS of the G_OR
2998 if (canReplaceReg(OrDst, LHS, MRI) &&
2999 (LHSBits.One | RHSBits.Zero).isAllOnes()) {
3000 Replacement = LHS;
3001 return true;
3002 }
3003
3004 // Check if we can replace OrDst with the RHS of the G_OR
3005 if (canReplaceReg(OrDst, RHS, MRI) &&
3006 (LHSBits.Zero | RHSBits.One).isAllOnes()) {
3007 Replacement = RHS;
3008 return true;
3009 }
3010
3011 return false;
3012}
3013
3014bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) {
3015 // If the input is already sign extended, just drop the extension.
3016 Register Src = MI.getOperand(1).getReg();
3017 unsigned ExtBits = MI.getOperand(2).getImm();
3018 unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
3019 return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
3020}
3021
3022static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
3023 int64_t Cst, bool IsVector, bool IsFP) {
3024 // For i1, Cst will always be -1 regardless of boolean contents.
3025 return (ScalarSizeBits == 1 && Cst == -1) ||
3026 isConstTrueVal(TLI, Cst, IsVector, IsFP);
3027}
3028
3029bool CombinerHelper::matchNotCmp(MachineInstr &MI,
3030 SmallVectorImpl<Register> &RegsToNegate) {
3031 assert(MI.getOpcode() == TargetOpcode::G_XOR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_XOR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_XOR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3031, __extension__
__PRETTY_FUNCTION__))
;
3032 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
3033 const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
3034 Register XorSrc;
3035 Register CstReg;
3036 // We match xor(src, true) here.
3037 if (!mi_match(MI.getOperand(0).getReg(), MRI,
3038 m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
3039 return false;
3040
3041 if (!MRI.hasOneNonDBGUse(XorSrc))
3042 return false;
3043
3044 // Check that XorSrc is the root of a tree of comparisons combined with ANDs
3045 // and ORs. The suffix of RegsToNegate starting from index I is used a work
3046 // list of tree nodes to visit.
3047 RegsToNegate.push_back(XorSrc);
3048 // Remember whether the comparisons are all integer or all floating point.
3049 bool IsInt = false;
3050 bool IsFP = false;
3051 for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
3052 Register Reg = RegsToNegate[I];
3053 if (!MRI.hasOneNonDBGUse(Reg))
3054 return false;
3055 MachineInstr *Def = MRI.getVRegDef(Reg);
3056 switch (Def->getOpcode()) {
3057 default:
3058 // Don't match if the tree contains anything other than ANDs, ORs and
3059 // comparisons.
3060 return false;
3061 case TargetOpcode::G_ICMP:
3062 if (IsFP)
3063 return false;
3064 IsInt = true;
3065 // When we apply the combine we will invert the predicate.
3066 break;
3067 case TargetOpcode::G_FCMP:
3068 if (IsInt)
3069 return false;
3070 IsFP = true;
3071 // When we apply the combine we will invert the predicate.
3072 break;
3073 case TargetOpcode::G_AND:
3074 case TargetOpcode::G_OR:
3075 // Implement De Morgan's laws:
3076 // ~(x & y) -> ~x | ~y
3077 // ~(x | y) -> ~x & ~y
3078 // When we apply the combine we will change the opcode and recursively
3079 // negate the operands.
3080 RegsToNegate.push_back(Def->getOperand(1).getReg());
3081 RegsToNegate.push_back(Def->getOperand(2).getReg());
3082 break;
3083 }
3084 }
3085
3086 // Now we know whether the comparisons are integer or floating point, check
3087 // the constant in the xor.
3088 int64_t Cst;
3089 if (Ty.isVector()) {
3090 MachineInstr *CstDef = MRI.getVRegDef(CstReg);
3091 auto MaybeCst = getIConstantSplatSExtVal(*CstDef, MRI);
3092 if (!MaybeCst)
3093 return false;
3094 if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP))
3095 return false;
3096 } else {
3097 if (!mi_match(CstReg, MRI, m_ICst(Cst)))
3098 return false;
3099 if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP))
3100 return false;
3101 }
3102
3103 return true;
3104}
3105
3106void CombinerHelper::applyNotCmp(MachineInstr &MI,
3107 SmallVectorImpl<Register> &RegsToNegate) {
3108 for (Register Reg : RegsToNegate) {
3109 MachineInstr *Def = MRI.getVRegDef(Reg);
3110 Observer.changingInstr(*Def);
3111 // For each comparison, invert the opcode. For each AND and OR, change the
3112 // opcode.
3113 switch (Def->getOpcode()) {
3114 default:
3115 llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp"
, 3115)
;
3116 case TargetOpcode::G_ICMP:
3117 case TargetOpcode::G_FCMP: {
3118 MachineOperand &PredOp = Def->getOperand(1);
3119 CmpInst::Predicate NewP = CmpInst::getInversePredicate(
3120 (CmpInst::Predicate)PredOp.getPredicate());
3121 PredOp.setPredicate(NewP);
3122 break;
3123 }
3124 case TargetOpcode::G_AND:
3125 Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
3126 break;
3127 case TargetOpcode::G_OR:
3128 Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3129 break;
3130 }
3131 Observer.changedInstr(*Def);
3132 }
3133
3134 replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
3135 MI.eraseFromParent();
3136}
3137
3138bool CombinerHelper::matchXorOfAndWithSameReg(
3139 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3140 // Match (xor (and x, y), y) (or any of its commuted cases)
3141 assert(MI.getOpcode() == TargetOpcode::G_XOR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_XOR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_XOR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3141, __extension__
__PRETTY_FUNCTION__))
;
3142 Register &X = MatchInfo.first;
3143 Register &Y = MatchInfo.second;
3144 Register AndReg = MI.getOperand(1).getReg();
3145 Register SharedReg = MI.getOperand(2).getReg();
3146
3147 // Find a G_AND on either side of the G_XOR.
3148 // Look for one of
3149 //
3150 // (xor (and x, y), SharedReg)
3151 // (xor SharedReg, (and x, y))
3152 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) {
3153 std::swap(AndReg, SharedReg);
3154 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y))))
3155 return false;
3156 }
3157
3158 // Only do this if we'll eliminate the G_AND.
3159 if (!MRI.hasOneNonDBGUse(AndReg))
3160 return false;
3161
3162 // We can combine if SharedReg is the same as either the LHS or RHS of the
3163 // G_AND.
3164 if (Y != SharedReg)
3165 std::swap(X, Y);
3166 return Y == SharedReg;
3167}
3168
3169void CombinerHelper::applyXorOfAndWithSameReg(
3170 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3171 // Fold (xor (and x, y), y) -> (and (not x), y)
3172 Builder.setInstrAndDebugLoc(MI);
3173 Register X, Y;
3174 std::tie(X, Y) = MatchInfo;
3175 auto Not = Builder.buildNot(MRI.getType(X), X);
3176 Observer.changingInstr(MI);
3177 MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3178 MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3179 MI.getOperand(2).setReg(Y);
3180 Observer.changedInstr(MI);
3181}
3182
3183bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) {
3184 auto &PtrAdd = cast<GPtrAdd>(MI);
3185 Register DstReg = PtrAdd.getReg(0);
3186 LLT Ty = MRI.getType(DstReg);
3187 const DataLayout &DL = Builder.getMF().getDataLayout();
3188
3189 if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace()))
3190 return false;
3191
3192 if (Ty.isPointer()) {
3193 auto ConstVal = getIConstantVRegVal(PtrAdd.getBaseReg(), MRI);
3194 return ConstVal && *ConstVal == 0;
3195 }
3196
3197 assert(Ty.isVector() && "Expecting a vector type")(static_cast <bool> (Ty.isVector() && "Expecting a vector type"
) ? void (0) : __assert_fail ("Ty.isVector() && \"Expecting a vector type\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3197, __extension__
__PRETTY_FUNCTION__))
;
3198 const MachineInstr *VecMI = MRI.getVRegDef(PtrAdd.getBaseReg());
3199 return isBuildVectorAllZeros(*VecMI, MRI);
3200}
3201
3202void CombinerHelper::applyPtrAddZero(MachineInstr &MI) {
3203 auto &PtrAdd = cast<GPtrAdd>(MI);
3204 Builder.setInstrAndDebugLoc(PtrAdd);
3205 Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg());
3206 PtrAdd.eraseFromParent();
3207}
3208
3209/// The second source operand is known to be a power of 2.
3210void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) {
3211 Register DstReg = MI.getOperand(0).getReg();
3212 Register Src0 = MI.getOperand(1).getReg();
3213 Register Pow2Src1 = MI.getOperand(2).getReg();
3214 LLT Ty = MRI.getType(DstReg);
3215 Builder.setInstrAndDebugLoc(MI);
3216
3217 // Fold (urem x, pow2) -> (and x, pow2-1)
3218 auto NegOne = Builder.buildConstant(Ty, -1);
3219 auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne);
3220 Builder.buildAnd(DstReg, Src0, Add);
3221 MI.eraseFromParent();
3222}
3223
3224bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI,
3225 unsigned &SelectOpNo) {
3226 Register LHS = MI.getOperand(1).getReg();
3227 Register RHS = MI.getOperand(2).getReg();
3228
3229 Register OtherOperandReg = RHS;
3230 SelectOpNo = 1;
3231 MachineInstr *Select = MRI.getVRegDef(LHS);
3232
3233 // Don't do this unless the old select is going away. We want to eliminate the
3234 // binary operator, not replace a binop with a select.
3235 if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3236 !MRI.hasOneNonDBGUse(LHS)) {
3237 OtherOperandReg = LHS;
3238 SelectOpNo = 2;
3239 Select = MRI.getVRegDef(RHS);
3240 if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3241 !MRI.hasOneNonDBGUse(RHS))
3242 return false;
3243 }
3244
3245 MachineInstr *SelectLHS = MRI.getVRegDef(Select->getOperand(2).getReg());
3246 MachineInstr *SelectRHS = MRI.getVRegDef(Select->getOperand(3).getReg());
3247
3248 if (!isConstantOrConstantVector(*SelectLHS, MRI,
3249 /*AllowFP*/ true,
3250 /*AllowOpaqueConstants*/ false))
3251 return false;
3252 if (!isConstantOrConstantVector(*SelectRHS, MRI,
3253 /*AllowFP*/ true,
3254 /*AllowOpaqueConstants*/ false))
3255 return false;
3256
3257 unsigned BinOpcode = MI.getOpcode();
3258
3259 // We know know one of the operands is a select of constants. Now verify that
3260 // the other binary operator operand is either a constant, or we can handle a
3261 // variable.
3262 bool CanFoldNonConst =
3263 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3264 (isNullOrNullSplat(*SelectLHS, MRI) ||
3265 isAllOnesOrAllOnesSplat(*SelectLHS, MRI)) &&
3266 (isNullOrNullSplat(*SelectRHS, MRI) ||
3267 isAllOnesOrAllOnesSplat(*SelectRHS, MRI));
3268 if (CanFoldNonConst)
3269 return true;
3270
3271 return isConstantOrConstantVector(*MRI.getVRegDef(OtherOperandReg), MRI,
3272 /*AllowFP*/ true,
3273 /*AllowOpaqueConstants*/ false);
3274}
3275
3276/// \p SelectOperand is the operand in binary operator \p MI that is the select
3277/// to fold.
3278bool CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI,
3279 const unsigned &SelectOperand) {
3280 Builder.setInstrAndDebugLoc(MI);
3281
3282 Register Dst = MI.getOperand(0).getReg();
3283 Register LHS = MI.getOperand(1).getReg();
3284 Register RHS = MI.getOperand(2).getReg();
3285 MachineInstr *Select = MRI.getVRegDef(MI.getOperand(SelectOperand).getReg());
3286
3287 Register SelectCond = Select->getOperand(1).getReg();
3288 Register SelectTrue = Select->getOperand(2).getReg();
3289 Register SelectFalse = Select->getOperand(3).getReg();
3290
3291 LLT Ty = MRI.getType(Dst);
3292 unsigned BinOpcode = MI.getOpcode();
3293
3294 Register FoldTrue, FoldFalse;
3295
3296 // We have a select-of-constants followed by a binary operator with a
3297 // constant. Eliminate the binop by pulling the constant math into the select.
3298 // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
3299 if (SelectOperand == 1) {
3300 // TODO: SelectionDAG verifies this actually constant folds before
3301 // committing to the combine.
3302
3303 FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {SelectTrue, RHS}).getReg(0);
3304 FoldFalse =
3305 Builder.buildInstr(BinOpcode, {Ty}, {SelectFalse, RHS}).getReg(0);
3306 } else {
3307 FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectTrue}).getReg(0);
3308 FoldFalse =
3309 Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectFalse}).getReg(0);
3310 }
3311
3312 Builder.buildSelect(Dst, SelectCond, FoldTrue, FoldFalse, MI.getFlags());
3313 MI.eraseFromParent();
3314
3315 return true;
3316}
3317
3318std::optional<SmallVector<Register, 8>>
3319CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
3320 assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!")(static_cast <bool> (Root->getOpcode() == TargetOpcode
::G_OR && "Expected G_OR only!") ? void (0) : __assert_fail
("Root->getOpcode() == TargetOpcode::G_OR && \"Expected G_OR only!\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3320, __extension__
__PRETTY_FUNCTION__))
;
3321 // We want to detect if Root is part of a tree which represents a bunch
3322 // of loads being merged into a larger load. We'll try to recognize patterns
3323 // like, for example:
3324 //
3325 // Reg Reg
3326 // \ /
3327 // OR_1 Reg
3328 // \ /
3329 // OR_2
3330 // \ Reg
3331 // .. /
3332 // Root
3333 //
3334 // Reg Reg Reg Reg
3335 // \ / \ /
3336 // OR_1 OR_2
3337 // \ /
3338 // \ /
3339 // ...
3340 // Root
3341 //
3342 // Each "Reg" may have been produced by a load + some arithmetic. This
3343 // function will save each of them.
3344 SmallVector<Register, 8> RegsToVisit;
3345 SmallVector<const MachineInstr *, 7> Ors = {Root};
3346
3347 // In the "worst" case, we're dealing with a load for each byte. So, there
3348 // are at most #bytes - 1 ORs.
3349 const unsigned MaxIter =
3350 MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1;
3351 for (unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3352 if (Ors.empty())
3353 break;
3354 const MachineInstr *Curr = Ors.pop_back_val();
3355 Register OrLHS = Curr->getOperand(1).getReg();
3356 Register OrRHS = Curr->getOperand(2).getReg();
3357
3358 // In the combine, we want to elimate the entire tree.
3359 if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS))
3360 return std::nullopt;
3361
3362 // If it's a G_OR, save it and continue to walk. If it's not, then it's
3363 // something that may be a load + arithmetic.
3364 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI))
3365 Ors.push_back(Or);
3366 else
3367 RegsToVisit.push_back(OrLHS);
3368 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI))
3369 Ors.push_back(Or);
3370 else
3371 RegsToVisit.push_back(OrRHS);
3372 }
3373
3374 // We're going to try and merge each register into a wider power-of-2 type,
3375 // so we ought to have an even number of registers.
3376 if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0)
3377 return std::nullopt;
3378 return RegsToVisit;
3379}
3380
3381/// Helper function for findLoadOffsetsForLoadOrCombine.
3382///
3383/// Check if \p Reg is the result of loading a \p MemSizeInBits wide value,
3384/// and then moving that value into a specific byte offset.
3385///
3386/// e.g. x[i] << 24
3387///
3388/// \returns The load instruction and the byte offset it is moved into.
3389static std::optional<std::pair<GZExtLoad *, int64_t>>
3390matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
3391 const MachineRegisterInfo &MRI) {
3392 assert(MRI.hasOneNonDBGUse(Reg) &&(static_cast <bool> (MRI.hasOneNonDBGUse(Reg) &&
"Expected Reg to only have one non-debug use?") ? void (0) :
__assert_fail ("MRI.hasOneNonDBGUse(Reg) && \"Expected Reg to only have one non-debug use?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3393, __extension__
__PRETTY_FUNCTION__))
3393 "Expected Reg to only have one non-debug use?")(static_cast <bool> (MRI.hasOneNonDBGUse(Reg) &&
"Expected Reg to only have one non-debug use?") ? void (0) :
__assert_fail ("MRI.hasOneNonDBGUse(Reg) && \"Expected Reg to only have one non-debug use?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3393, __extension__
__PRETTY_FUNCTION__))
;
3394 Register MaybeLoad;
3395 int64_t Shift;
3396 if (!mi_match(Reg, MRI,
3397 m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) {
3398 Shift = 0;
3399 MaybeLoad = Reg;
3400 }
3401
3402 if (Shift % MemSizeInBits != 0)
3403 return std::nullopt;
3404
3405 // TODO: Handle other types of loads.
3406 auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI);
3407 if (!Load)
3408 return std::nullopt;
3409
3410 if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3411 return std::nullopt;
3412
3413 return std::make_pair(Load, Shift / MemSizeInBits);
3414}
3415
3416std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3417CombinerHelper::findLoadOffsetsForLoadOrCombine(
3418 SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
3419 const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
3420
3421 // Each load found for the pattern. There should be one for each RegsToVisit.
3422 SmallSetVector<const MachineInstr *, 8> Loads;
3423
3424 // The lowest index used in any load. (The lowest "i" for each x[i].)
3425 int64_t LowestIdx = INT64_MAX(9223372036854775807L);
3426
3427 // The load which uses the lowest index.
3428 GZExtLoad *LowestIdxLoad = nullptr;
3429
3430 // Keeps track of the load indices we see. We shouldn't see any indices twice.
3431 SmallSet<int64_t, 8> SeenIdx;
3432
3433 // Ensure each load is in the same MBB.
3434 // TODO: Support multiple MachineBasicBlocks.
3435 MachineBasicBlock *MBB = nullptr;
3436 const MachineMemOperand *MMO = nullptr;
3437
3438 // Earliest instruction-order load in the pattern.
3439 GZExtLoad *EarliestLoad = nullptr;
3440
3441 // Latest instruction-order load in the pattern.
3442 GZExtLoad *LatestLoad = nullptr;
3443
3444 // Base pointer which every load should share.
3445 Register BasePtr;
3446
3447 // We want to find a load for each register. Each load should have some
3448 // appropriate bit twiddling arithmetic. During this loop, we will also keep
3449 // track of the load which uses the lowest index. Later, we will check if we
3450 // can use its pointer in the final, combined load.
3451 for (auto Reg : RegsToVisit) {
3452 // Find the load, and find the position that it will end up in (e.g. a
3453 // shifted) value.
3454 auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI);
3455 if (!LoadAndPos)
3456 return std::nullopt;
3457 GZExtLoad *Load;
3458 int64_t DstPos;
3459 std::tie(Load, DstPos) = *LoadAndPos;
3460
3461 // TODO: Handle multiple MachineBasicBlocks. Currently not handled because
3462 // it is difficult to check for stores/calls/etc between loads.
3463 MachineBasicBlock *LoadMBB = Load->getParent();
3464 if (!MBB)
3465 MBB = LoadMBB;
3466 if (LoadMBB != MBB)
3467 return std::nullopt;
3468
3469 // Make sure that the MachineMemOperands of every seen load are compatible.
3470 auto &LoadMMO = Load->getMMO();
3471 if (!MMO)
3472 MMO = &LoadMMO;
3473 if (MMO->getAddrSpace() != LoadMMO.getAddrSpace())
3474 return std::nullopt;
3475
3476 // Find out what the base pointer and index for the load is.
3477 Register LoadPtr;
3478 int64_t Idx;
3479 if (!mi_match(Load->getOperand(1).getReg(), MRI,
3480 m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) {
3481 LoadPtr = Load->getOperand(1).getReg();
3482 Idx = 0;
3483 }
3484
3485 // Don't combine things like a[i], a[i] -> a bigger load.
3486 if (!SeenIdx.insert(Idx).second)
3487 return std::nullopt;
3488
3489 // Every load must share the same base pointer; don't combine things like:
3490 //
3491 // a[i], b[i + 1] -> a bigger load.
3492 if (!BasePtr.isValid())
3493 BasePtr = LoadPtr;
3494 if (BasePtr != LoadPtr)
3495 return std::nullopt;
3496
3497 if (Idx < LowestIdx) {
3498 LowestIdx = Idx;
3499 LowestIdxLoad = Load;
3500 }
3501
3502 // Keep track of the byte offset that this load ends up at. If we have seen
3503 // the byte offset, then stop here. We do not want to combine:
3504 //
3505 // a[i] << 16, a[i + k] << 16 -> a bigger load.
3506 if (!MemOffset2Idx.try_emplace(DstPos, Idx).second)
3507 return std::nullopt;
3508 Loads.insert(Load);
3509
3510 // Keep track of the position of the earliest/latest loads in the pattern.
3511 // We will check that there are no load fold barriers between them later
3512 // on.
3513 //
3514 // FIXME: Is there a better way to check for load fold barriers?
3515 if (!EarliestLoad || dominates(*Load, *EarliestLoad))
3516 EarliestLoad = Load;
3517 if (!LatestLoad || dominates(*LatestLoad, *Load))
3518 LatestLoad = Load;
3519 }
3520
3521 // We found a load for each register. Let's check if each load satisfies the
3522 // pattern.
3523 assert(Loads.size() == RegsToVisit.size() &&(static_cast <bool> (Loads.size() == RegsToVisit.size()
&& "Expected to find a load for each register?") ? void
(0) : __assert_fail ("Loads.size() == RegsToVisit.size() && \"Expected to find a load for each register?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3524, __extension__
__PRETTY_FUNCTION__))
3524 "Expected to find a load for each register?")(static_cast <bool> (Loads.size() == RegsToVisit.size()
&& "Expected to find a load for each register?") ? void
(0) : __assert_fail ("Loads.size() == RegsToVisit.size() && \"Expected to find a load for each register?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3524, __extension__
__PRETTY_FUNCTION__))
;
3525 assert(EarliestLoad != LatestLoad && EarliestLoad &&(static_cast <bool> (EarliestLoad != LatestLoad &&
EarliestLoad && LatestLoad && "Expected at least two loads?"
) ? void (0) : __assert_fail ("EarliestLoad != LatestLoad && EarliestLoad && LatestLoad && \"Expected at least two loads?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3526, __extension__
__PRETTY_FUNCTION__))
3526 LatestLoad && "Expected at least two loads?")(static_cast <bool> (EarliestLoad != LatestLoad &&
EarliestLoad && LatestLoad && "Expected at least two loads?"
) ? void (0) : __assert_fail ("EarliestLoad != LatestLoad && EarliestLoad && LatestLoad && \"Expected at least two loads?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3526, __extension__
__PRETTY_FUNCTION__))
;
3527
3528 // Check if there are any stores, calls, etc. between any of the loads. If
3529 // there are, then we can't safely perform the combine.
3530 //
3531 // MaxIter is chosen based off the (worst case) number of iterations it
3532 // typically takes to succeed in the LLVM test suite plus some padding.
3533 //
3534 // FIXME: Is there a better way to check for load fold barriers?
3535 const unsigned MaxIter = 20;
3536 unsigned Iter = 0;
3537 for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(),
3538 LatestLoad->getIterator())) {
3539 if (Loads.count(&MI))
3540 continue;
3541 if (MI.isLoadFoldBarrier())
3542 return std::nullopt;
3543 if (Iter++ == MaxIter)
3544 return std::nullopt;
3545 }
3546
3547 return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3548}
3549
3550bool CombinerHelper::matchLoadOrCombine(
3551 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3552 assert(MI.getOpcode() == TargetOpcode::G_OR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_OR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_OR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3552, __extension__
__PRETTY_FUNCTION__))
;
3553 MachineFunction &MF = *MI.getMF();
3554 // Assuming a little-endian target, transform:
3555 // s8 *a = ...
3556 // s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
3557 // =>
3558 // s32 val = *((i32)a)
3559 //
3560 // s8 *a = ...
3561 // s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
3562 // =>
3563 // s32 val = BSWAP(*((s32)a))
3564 Register Dst = MI.getOperand(0).getReg();
3565 LLT Ty = MRI.getType(Dst);
3566 if (Ty.isVector())
3567 return false;
3568
3569 // We need to combine at least two loads into this type. Since the smallest
3570 // possible load is into a byte, we need at least a 16-bit wide type.
3571 const unsigned WideMemSizeInBits = Ty.getSizeInBits();
3572 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3573 return false;
3574
3575 // Match a collection of non-OR instructions in the pattern.
3576 auto RegsToVisit = findCandidatesForLoadOrCombine(&MI);
3577 if (!RegsToVisit)
3578 return false;
3579
3580 // We have a collection of non-OR instructions. Figure out how wide each of
3581 // the small loads should be based off of the number of potential loads we
3582 // found.
3583 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3584 if (NarrowMemSizeInBits % 8 != 0)
3585 return false;
3586
3587 // Check if each register feeding into each OR is a load from the same
3588 // base pointer + some arithmetic.
3589 //
3590 // e.g. a[0], a[1] << 8, a[2] << 16, etc.
3591 //
3592 // Also verify that each of these ends up putting a[i] into the same memory
3593 // offset as a load into a wide type would.
3594 SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx;
3595 GZExtLoad *LowestIdxLoad, *LatestLoad;
3596 int64_t LowestIdx;
3597 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3598 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3599 if (!MaybeLoadInfo)
3600 return false;
3601 std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3602
3603 // We have a bunch of loads being OR'd together. Using the addresses + offsets
3604 // we found before, check if this corresponds to a big or little endian byte
3605 // pattern. If it does, then we can represent it using a load + possibly a
3606 // BSWAP.
3607 bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
3608 std::optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
3609 if (!IsBigEndian)
3610 return false;
3611 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3612 if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}}))
3613 return false;
3614
3615 // Make sure that the load from the lowest index produces offset 0 in the
3616 // final value.
3617 //
3618 // This ensures that we won't combine something like this:
3619 //
3620 // load x[i] -> byte 2
3621 // load x[i+1] -> byte 0 ---> wide_load x[i]
3622 // load x[i+2] -> byte 1
3623 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3624 const unsigned ZeroByteOffset =
3625 *IsBigEndian
3626 ? bigEndianByteAt(NumLoadsInTy, 0)
3627 : littleEndianByteAt(NumLoadsInTy, 0);
3628 auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset);
3629 if (ZeroOffsetIdx == MemOffset2Idx.end() ||
3630 ZeroOffsetIdx->second != LowestIdx)
3631 return false;
3632
3633 // We wil reuse the pointer from the load which ends up at byte offset 0. It
3634 // may not use index 0.
3635 Register Ptr = LowestIdxLoad->getPointerReg();
3636 const MachineMemOperand &MMO = LowestIdxLoad->getMMO();
3637 LegalityQuery::MemDesc MMDesc(MMO);
3638 MMDesc.MemoryTy = Ty;
3639 if (!isLegalOrBeforeLegalizer(
3640 {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}}))
3641 return false;
3642 auto PtrInfo = MMO.getPointerInfo();
3643 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8);
3644
3645 // Load must be allowed and fast on the target.
3646 LLVMContext &C = MF.getFunction().getContext();
3647 auto &DL = MF.getDataLayout();
3648 unsigned Fast = 0;
3649 if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) ||
3650 !Fast)
3651 return false;
3652
3653 MatchInfo = [=](MachineIRBuilder &MIB) {
3654 MIB.setInstrAndDebugLoc(*LatestLoad);
3655 Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst;
3656 MIB.buildLoad(LoadDst, Ptr, *NewMMO);
3657 if (NeedsBSwap)
3658 MIB.buildBSwap(Dst, LoadDst);
3659 };
3660 return true;
3661}
3662
3663bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI,
3664 MachineInstr *&ExtMI) {
3665 assert(MI.getOpcode() == TargetOpcode::G_PHI)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_PHI
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_PHI"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3665, __extension__
__PRETTY_FUNCTION__))
;
3666
3667 Register DstReg = MI.getOperand(0).getReg();
3668
3669 // TODO: Extending a vector may be expensive, don't do this until heuristics
3670 // are better.
3671 if (MRI.getType(DstReg).isVector())
3672 return false;
3673
3674 // Try to match a phi, whose only use is an extend.
3675 if (!MRI.hasOneNonDBGUse(DstReg))
3676 return false;
3677 ExtMI = &*MRI.use_instr_nodbg_begin(DstReg);
3678 switch (ExtMI->getOpcode()) {
3679 case TargetOpcode::G_ANYEXT:
3680 return true; // G_ANYEXT is usually free.
3681 case TargetOpcode::G_ZEXT:
3682 case TargetOpcode::G_SEXT:
3683 break;
3684 default:
3685 return false;
3686 }
3687
3688 // If the target is likely to fold this extend away, don't propagate.
3689 if (Builder.getTII().isExtendLikelyToBeFolded(*ExtMI, MRI))
3690 return false;
3691
3692 // We don't want to propagate the extends unless there's a good chance that
3693 // they'll be optimized in some way.
3694 // Collect the unique incoming values.
3695 SmallPtrSet<MachineInstr *, 4> InSrcs;
3696 for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {
3697 auto *DefMI = getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI);
3698 switch (DefMI->getOpcode()) {
3699 case TargetOpcode::G_LOAD:
3700 case TargetOpcode::G_TRUNC:
3701 case TargetOpcode::G_SEXT:
3702 case TargetOpcode::G_ZEXT:
3703 case TargetOpcode::G_ANYEXT:
3704 case TargetOpcode::G_CONSTANT:
3705 InSrcs.insert(getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI));
3706 // Don't try to propagate if there are too many places to create new
3707 // extends, chances are it'll increase code size.
3708 if (InSrcs.size() > 2)
3709 return false;
3710 break;
3711 default:
3712 return false;
3713 }
3714 }
3715 return true;
3716}
3717
3718void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI,
3719 MachineInstr *&ExtMI) {
3720 assert(MI.getOpcode() == TargetOpcode::G_PHI)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_PHI
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_PHI"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3720, __extension__
__PRETTY_FUNCTION__))
;
3721 Register DstReg = ExtMI->getOperand(0).getReg();
3722 LLT ExtTy = MRI.getType(DstReg);
3723
3724 // Propagate the extension into the block of each incoming reg's block.
3725 // Use a SetVector here because PHIs can have duplicate edges, and we want
3726 // deterministic iteration order.
3727 SmallSetVector<MachineInstr *, 8> SrcMIs;
3728 SmallDenseMap<MachineInstr *, MachineInstr *, 8> OldToNewSrcMap;
3729 for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); SrcIdx += 2) {
3730 auto *SrcMI = MRI.getVRegDef(MI.getOperand(SrcIdx).getReg());
3731 if (!SrcMIs.insert(SrcMI))
3732 continue;
3733
3734 // Build an extend after each src inst.
3735 auto *MBB = SrcMI->getParent();
3736 MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator();
3737 if (InsertPt != MBB->end() && InsertPt->isPHI())
3738 InsertPt = MBB->getFirstNonPHI();
3739
3740 Builder.setInsertPt(*SrcMI->getParent(), InsertPt);
3741 Builder.setDebugLoc(MI.getDebugLoc());
3742 auto NewExt = Builder.buildExtOrTrunc(ExtMI->getOpcode(), ExtTy,
3743 SrcMI->getOperand(0).getReg());
3744 OldToNewSrcMap[SrcMI] = NewExt;
3745 }
3746
3747 // Create a new phi with the extended inputs.
3748 Builder.setInstrAndDebugLoc(MI);
3749 auto NewPhi = Builder.buildInstrNoInsert(TargetOpcode::G_PHI);
3750 NewPhi.addDef(DstReg);
3751 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
3752 if (!MO.isReg()) {
3753 NewPhi.addMBB(MO.getMBB());
3754 continue;
3755 }
3756 auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(MO.getReg())];
3757 NewPhi.addUse(NewSrc->getOperand(0).getReg());
3758 }
3759 Builder.insertInstr(NewPhi);
3760 ExtMI->eraseFromParent();
3761}
3762
3763bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI,
3764 Register &Reg) {
3765 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3765, __extension__
__PRETTY_FUNCTION__))
;
3766 // If we have a constant index, look for a G_BUILD_VECTOR source
3767 // and find the source register that the index maps to.
3768 Register SrcVec = MI.getOperand(1).getReg();
3769 LLT SrcTy = MRI.getType(SrcVec);
3770
3771 auto Cst = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
3772 if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements())
3773 return false;
3774
3775 unsigned VecIdx = Cst->Value.getZExtValue();
3776
3777 // Check if we have a build_vector or build_vector_trunc with an optional
3778 // trunc in front.
3779 MachineInstr *SrcVecMI = MRI.getVRegDef(SrcVec);
3780 if (SrcVecMI->getOpcode() == TargetOpcode::G_TRUNC) {
3781 SrcVecMI = MRI.getVRegDef(SrcVecMI->getOperand(1).getReg());
3782 }
3783
3784 if (SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
3785 SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC)
3786 return false;
3787
3788 EVT Ty(getMVTForLLT(SrcTy));
3789 if (!MRI.hasOneNonDBGUse(SrcVec) &&
3790 !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty))
3791 return false;
3792
3793 Reg = SrcVecMI->getOperand(VecIdx + 1).getReg();
3794 return true;
3795}
3796
3797void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI,
3798 Register &Reg) {
3799 // Check the type of the register, since it may have come from a
3800 // G_BUILD_VECTOR_TRUNC.
3801 LLT ScalarTy = MRI.getType(Reg);
3802 Register DstReg = MI.getOperand(0).getReg();
3803 LLT DstTy = MRI.getType(DstReg);
3804
3805 Builder.setInstrAndDebugLoc(MI);
3806 if (ScalarTy != DstTy) {
3807 assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits())(static_cast <bool> (ScalarTy.getSizeInBits() > DstTy
.getSizeInBits()) ? void (0) : __assert_fail ("ScalarTy.getSizeInBits() > DstTy.getSizeInBits()"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3807, __extension__
__PRETTY_FUNCTION__))
;
3808 Builder.buildTrunc(DstReg, Reg);
3809 MI.eraseFromParent();
3810 return;
3811 }
3812 replaceSingleDefInstWithReg(MI, Reg);
3813}
3814
3815bool CombinerHelper::matchExtractAllEltsFromBuildVector(
3816 MachineInstr &MI,
3817 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
3818 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3818, __extension__
__PRETTY_FUNCTION__))
;
3819 // This combine tries to find build_vector's which have every source element
3820 // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like
3821 // the masked load scalarization is run late in the pipeline. There's already
3822 // a combine for a similar pattern starting from the extract, but that
3823 // doesn't attempt to do it if there are multiple uses of the build_vector,
3824 // which in this case is true. Starting the combine from the build_vector
3825 // feels more natural than trying to find sibling nodes of extracts.
3826 // E.g.
3827 // %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4
3828 // %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0
3829 // %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1
3830 // %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2
3831 // %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3
3832 // ==>
3833 // replace ext{1,2,3,4} with %s{1,2,3,4}
3834
3835 Register DstReg = MI.getOperand(0).getReg();
3836 LLT DstTy = MRI.getType(DstReg);
3837 unsigned NumElts = DstTy.getNumElements();
3838
3839 SmallBitVector ExtractedElts(NumElts);
3840 for (MachineInstr &II : MRI.use_nodbg_instructions(DstReg)) {
3841 if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
3842 return false;
3843 auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI);
3844 if (!Cst)
3845 return false;
3846 unsigned Idx = Cst->getZExtValue();
3847 if (Idx >= NumElts)
3848 return false; // Out of range.
3849 ExtractedElts.set(Idx);
3850 SrcDstPairs.emplace_back(
3851 std::make_pair(MI.getOperand(Idx + 1).getReg(), &II));
3852 }
3853 // Match if every element was extracted.
3854 return ExtractedElts.all();
3855}
3856
3857void CombinerHelper::applyExtractAllEltsFromBuildVector(
3858 MachineInstr &MI,
3859 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
3860 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3860, __extension__
__PRETTY_FUNCTION__))
;
3861 for (auto &Pair : SrcDstPairs) {
3862 auto *ExtMI = Pair.second;
3863 replaceRegWith(MRI, ExtMI->getOperand(0).getReg(), Pair.first);
3864 ExtMI->eraseFromParent();
3865 }
3866 MI.eraseFromParent();
3867}
3868
3869void CombinerHelper::applyBuildFn(
3870 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3871 Builder.setInstrAndDebugLoc(MI);
3872 MatchInfo(Builder);
3873 MI.eraseFromParent();
3874}
3875
3876void CombinerHelper::applyBuildFnNoErase(
3877 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3878 Builder.setInstrAndDebugLoc(MI);
3879 MatchInfo(Builder);
3880}
3881
3882bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI,
3883 BuildFnTy &MatchInfo) {
3884 assert(MI.getOpcode() == TargetOpcode::G_OR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_OR
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_OR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3884, __extension__
__PRETTY_FUNCTION__))
;
3885
3886 Register Dst = MI.getOperand(0).getReg();
3887 LLT Ty = MRI.getType(Dst);
3888 unsigned BitWidth = Ty.getScalarSizeInBits();
3889
3890 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
3891 unsigned FshOpc = 0;
3892
3893 // Match (or (shl ...), (lshr ...)).
3894 if (!mi_match(Dst, MRI,
3895 // m_GOr() handles the commuted version as well.
3896 m_GOr(m_GShl(m_Reg(ShlSrc), m_Reg(ShlAmt)),
3897 m_GLShr(m_Reg(LShrSrc), m_Reg(LShrAmt)))))
3898 return false;
3899
3900 // Given constants C0 and C1 such that C0 + C1 is bit-width:
3901 // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1)
3902 int64_t CstShlAmt, CstLShrAmt;
3903 if (mi_match(ShlAmt, MRI, m_ICstOrSplat(CstShlAmt)) &&
3904 mi_match(LShrAmt, MRI, m_ICstOrSplat(CstLShrAmt)) &&
3905 CstShlAmt + CstLShrAmt == BitWidth) {
3906 FshOpc = TargetOpcode::G_FSHR;
3907 Amt = LShrAmt;
3908
3909 } else if (mi_match(LShrAmt, MRI,
3910 m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) &&
3911 ShlAmt == Amt) {
3912 // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt)
3913 FshOpc = TargetOpcode::G_FSHL;
3914
3915 } else if (mi_match(ShlAmt, MRI,
3916 m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) &&
3917 LShrAmt == Amt) {
3918 // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt)
3919 FshOpc = TargetOpcode::G_FSHR;
3920
3921 } else {
3922 return false;
3923 }
3924
3925 LLT AmtTy = MRI.getType(Amt);
3926 if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}}))
3927 return false;
3928
3929 MatchInfo = [=](MachineIRBuilder &B) {
3930 B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
3931 };
3932 return true;
3933}
3934
3935/// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
3936bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) {
3937 unsigned Opc = MI.getOpcode();
3938 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR)(static_cast <bool> (Opc == TargetOpcode::G_FSHL || Opc
== TargetOpcode::G_FSHR) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3938, __extension__
__PRETTY_FUNCTION__))
;
3939 Register X = MI.getOperand(1).getReg();
3940 Register Y = MI.getOperand(2).getReg();
3941 if (X != Y)
3942 return false;
3943 unsigned RotateOpc =
3944 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
3945 return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}});
3946}
3947
3948void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) {
3949 unsigned Opc = MI.getOpcode();
3950 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR)(static_cast <bool> (Opc == TargetOpcode::G_FSHL || Opc
== TargetOpcode::G_FSHR) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3950, __extension__
__PRETTY_FUNCTION__))
;
3951 bool IsFSHL = Opc == TargetOpcode::G_FSHL;
3952 Observer.changingInstr(MI);
3953 MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL
3954 : TargetOpcode::G_ROTR));
3955 MI.removeOperand(2);
3956 Observer.changedInstr(MI);
3957}
3958
3959// Fold (rot x, c) -> (rot x, c % BitSize)
3960bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) {
3961 assert(MI.getOpcode() == TargetOpcode::G_ROTL ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ROTL
|| MI.getOpcode() == TargetOpcode::G_ROTR) ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3962, __extension__
__PRETTY_FUNCTION__))
3962 MI.getOpcode() == TargetOpcode::G_ROTR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ROTL
|| MI.getOpcode() == TargetOpcode::G_ROTR) ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3962, __extension__
__PRETTY_FUNCTION__))
;
3963 unsigned Bitsize =
3964 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
3965 Register AmtReg = MI.getOperand(2).getReg();
3966 bool OutOfRange = false;
3967 auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) {
3968 if (auto *CI = dyn_cast<ConstantInt>(C))
3969 OutOfRange |= CI->getValue().uge(Bitsize);
3970 return true;
3971 };
3972 return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange;
3973}
3974
3975void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) {
3976 assert(MI.getOpcode() == TargetOpcode::G_ROTL ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ROTL
|| MI.getOpcode() == TargetOpcode::G_ROTR) ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3977, __extension__
__PRETTY_FUNCTION__))
3977 MI.getOpcode() == TargetOpcode::G_ROTR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ROTL
|| MI.getOpcode() == TargetOpcode::G_ROTR) ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3977, __extension__
__PRETTY_FUNCTION__))
;
3978 unsigned Bitsize =
3979 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
3980 Builder.setInstrAndDebugLoc(MI);
3981 Register Amt = MI.getOperand(2).getReg();
3982 LLT AmtTy = MRI.getType(Amt);
3983 auto Bits = Builder.buildConstant(AmtTy, Bitsize);
3984 Amt = Builder.buildURem(AmtTy, MI.getOperand(2).getReg(), Bits).getReg(0);
3985 Observer.changingInstr(MI);
3986 MI.getOperand(2).setReg(Amt);
3987 Observer.changedInstr(MI);
3988}
3989
3990bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI,
3991 int64_t &MatchInfo) {
3992 assert(MI.getOpcode() == TargetOpcode::G_ICMP)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ICMP
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ICMP"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3992, __extension__
__PRETTY_FUNCTION__))
;
3993 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
3994 auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg());
3995 auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg());
3996 std::optional<bool> KnownVal;
3997 switch (Pred) {
3998 default:
3999 llvm_unreachable("Unexpected G_ICMP predicate?")::llvm::llvm_unreachable_internal("Unexpected G_ICMP predicate?"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3999)
;
4000 case CmpInst::ICMP_EQ:
4001 KnownVal = KnownBits::eq(KnownLHS, KnownRHS);
4002 break;
4003 case CmpInst::ICMP_NE:
4004 KnownVal = KnownBits::ne(KnownLHS, KnownRHS);
4005 break;
4006 case CmpInst::ICMP_SGE:
4007 KnownVal = KnownBits::sge(KnownLHS, KnownRHS);
4008 break;
4009 case CmpInst::ICMP_SGT:
4010 KnownVal = KnownBits::sgt(KnownLHS, KnownRHS);
4011 break;
4012 case CmpInst::ICMP_SLE:
4013 KnownVal = KnownBits::sle(KnownLHS, KnownRHS);
4014 break;
4015 case CmpInst::ICMP_SLT:
4016 KnownVal = KnownBits::slt(KnownLHS, KnownRHS);
4017 break;
4018 case CmpInst::ICMP_UGE:
4019 KnownVal = KnownBits::uge(KnownLHS, KnownRHS);
4020 break;
4021 case CmpInst::ICMP_UGT:
4022 KnownVal = KnownBits::ugt(KnownLHS, KnownRHS);
4023 break;
4024 case CmpInst::ICMP_ULE:
4025 KnownVal = KnownBits::ule(KnownLHS, KnownRHS);
4026 break;
4027 case CmpInst::ICMP_ULT:
4028 KnownVal = KnownBits::ult(KnownLHS, KnownRHS);
4029 break;
4030 }
4031 if (!KnownVal)
4032 return false;
4033 MatchInfo =
4034 *KnownVal
4035 ? getICmpTrueVal(getTargetLowering(),
4036 /*IsVector = */
4037 MRI.getType(MI.getOperand(0).getReg()).isVector(),
4038 /* IsFP = */ false)
4039 : 0;
4040 return true;
4041}
4042
4043bool CombinerHelper::matchICmpToLHSKnownBits(
4044 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4045 assert(MI.getOpcode() == TargetOpcode::G_ICMP)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ICMP
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ICMP"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4045, __extension__
__PRETTY_FUNCTION__))
;
4046 // Given:
4047 //
4048 // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4049 // %cmp = G_ICMP ne %x, 0
4050 //
4051 // Or:
4052 //
4053 // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4054 // %cmp = G_ICMP eq %x, 1
4055 //
4056 // We can replace %cmp with %x assuming true is 1 on the target.
4057 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
4058 if (!CmpInst::isEquality(Pred))
4059 return false;
4060 Register Dst = MI.getOperand(0).getReg();
4061 LLT DstTy = MRI.getType(Dst);
4062 if (getICmpTrueVal(getTargetLowering(), DstTy.isVector(),
4063 /* IsFP = */ false) != 1)
4064 return false;
4065 int64_t OneOrZero = Pred == CmpInst::ICMP_EQ;
4066 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICst(OneOrZero)))
4067 return false;
4068 Register LHS = MI.getOperand(2).getReg();
4069 auto KnownLHS = KB->getKnownBits(LHS);
4070 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4071 return false;
4072 // Make sure replacing Dst with the LHS is a legal operation.
4073 LLT LHSTy = MRI.getType(LHS);
4074 unsigned LHSSize = LHSTy.getSizeInBits();
4075 unsigned DstSize = DstTy.getSizeInBits();
4076 unsigned Op = TargetOpcode::COPY;
4077 if (DstSize != LHSSize)
4078 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4079 if (!isLegalOrBeforeLegalizer({Op, {DstTy, LHSTy}}))
4080 return false;
4081 MatchInfo = [=](MachineIRBuilder &B) { B.buildInstr(Op, {Dst}, {LHS}); };
4082 return true;
4083}
4084
4085// Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0
4086bool CombinerHelper::matchAndOrDisjointMask(
4087 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4088 assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4088, __extension__
__PRETTY_FUNCTION__))
;
4089
4090 // Ignore vector types to simplify matching the two constants.
4091 // TODO: do this for vectors and scalars via a demanded bits analysis.
4092 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
4093 if (Ty.isVector())
4094 return false;
4095
4096 Register Src;
4097 Register AndMaskReg;
4098 int64_t AndMaskBits;
4099 int64_t OrMaskBits;
4100 if (!mi_match(MI, MRI,
4101 m_GAnd(m_GOr(m_Reg(Src), m_ICst(OrMaskBits)),
4102 m_all_of(m_ICst(AndMaskBits), m_Reg(AndMaskReg)))))
4103 return false;
4104
4105 // Check if OrMask could turn on any bits in Src.
4106 if (AndMaskBits & OrMaskBits)
4107 return false;
4108
4109 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4110 Observer.changingInstr(MI);
4111 // Canonicalize the result to have the constant on the RHS.
4112 if (MI.getOperand(1).getReg() == AndMaskReg)
4113 MI.getOperand(2).setReg(AndMaskReg);
4114 MI.getOperand(1).setReg(Src);
4115 Observer.changedInstr(MI);
4116 };
4117 return true;
4118}
4119
4120/// Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
4121bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
4122 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4123 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SEXT_INREG
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SEXT_INREG"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4123, __extension__
__PRETTY_FUNCTION__))
;
4124 Register Dst = MI.getOperand(0).getReg();
4125 Register Src = MI.getOperand(1).getReg();
4126 LLT Ty = MRI.getType(Src);
4127 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4128 if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}}))
4129 return false;
4130 int64_t Width = MI.getOperand(2).getImm();
4131 Register ShiftSrc;
4132 int64_t ShiftImm;
4133 if (!mi_match(
4134 Src, MRI,
4135 m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)),
4136 m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm))))))
4137 return false;
4138 if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
4139 return false;
4140
4141 MatchInfo = [=](MachineIRBuilder &B) {
4142 auto Cst1 = B.buildConstant(ExtractTy, ShiftImm);
4143 auto Cst2 = B.buildConstant(ExtractTy, Width);
4144 B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4145 };
4146 return true;
4147}
4148
4149/// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
4150bool CombinerHelper::matchBitfieldExtractFromAnd(
4151 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4152 assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4152, __extension__
__PRETTY_FUNCTION__))
;
4153 Register Dst = MI.getOperand(0).getReg();
4154 LLT Ty = MRI.getType(Dst);
4155 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4156 if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
4157 TargetOpcode::G_UBFX, Ty, ExtractTy))
4158 return false;
4159
4160 int64_t AndImm, LSBImm;
4161 Register ShiftSrc;
4162 const unsigned Size = Ty.getScalarSizeInBits();
4163 if (!mi_match(MI.getOperand(0).getReg(), MRI,
4164 m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))),
4165 m_ICst(AndImm))))
4166 return false;
4167
4168 // The mask is a mask of the low bits iff imm & (imm+1) == 0.
4169 auto MaybeMask = static_cast<uint64_t>(AndImm);
4170 if (MaybeMask & (MaybeMask + 1))
4171 return false;
4172
4173 // LSB must fit within the register.
4174 if (static_cast<uint64_t>(LSBImm) >= Size)
4175 return false;
4176
4177 uint64_t Width = APInt(Size, AndImm).countr_one();
4178 MatchInfo = [=](MachineIRBuilder &B) {
4179 auto WidthCst = B.buildConstant(ExtractTy, Width);
4180 auto LSBCst = B.buildConstant(ExtractTy, LSBImm);
4181 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});
4182 };
4183 return true;
4184}
4185
4186bool CombinerHelper::matchBitfieldExtractFromShr(
4187 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4188 const unsigned Opcode = MI.getOpcode();
4189 assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR)(static_cast <bool> (Opcode == TargetOpcode::G_ASHR || Opcode
== TargetOpcode::G_LSHR) ? void (0) : __assert_fail ("Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4189, __extension__
__PRETTY_FUNCTION__))
;
4190
4191 const Register Dst = MI.getOperand(0).getReg();
4192
4193 const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
4194 ? TargetOpcode::G_SBFX
4195 : TargetOpcode::G_UBFX;
4196
4197 // Check if the type we would use for the extract is legal
4198 LLT Ty = MRI.getType(Dst);
4199 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4200 if (!LI || !LI->isLegalOrCustom({ExtrOpcode, {Ty, ExtractTy}}))
4201 return false;
4202
4203 Register ShlSrc;
4204 int64_t ShrAmt;
4205 int64_t ShlAmt;
4206 const unsigned Size = Ty.getScalarSizeInBits();
4207
4208 // Try to match shr (shl x, c1), c2
4209 if (!mi_match(Dst, MRI,
4210 m_BinOp(Opcode,
4211 m_OneNonDBGUse(m_GShl(m_Reg(ShlSrc), m_ICst(ShlAmt))),
4212 m_ICst(ShrAmt))))
4213 return false;
4214
4215 // Make sure that the shift sizes can fit a bitfield extract
4216 if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size)
4217 return false;
4218
4219 // Skip this combine if the G_SEXT_INREG combine could handle it
4220 if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt)
4221 return false;
4222
4223 // Calculate start position and width of the extract
4224 const int64_t Pos = ShrAmt - ShlAmt;
4225 const int64_t Width = Size - ShrAmt;
4226
4227 MatchInfo = [=](MachineIRBuilder &B) {
4228 auto WidthCst = B.buildConstant(ExtractTy, Width);
4229 auto PosCst = B.buildConstant(ExtractTy, Pos);
4230 B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst});
4231 };
4232 return true;
4233}
4234
4235bool CombinerHelper::matchBitfieldExtractFromShrAnd(
4236 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4237 const unsigned Opcode = MI.getOpcode();
4238 assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR)(static_cast <bool> (Opcode == TargetOpcode::G_LSHR || Opcode
== TargetOpcode::G_ASHR) ? void (0) : __assert_fail ("Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4238, __extension__
__PRETTY_FUNCTION__))
;
4239
4240 const Register Dst = MI.getOperand(0).getReg();
4241 LLT Ty = MRI.getType(Dst);
4242 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4243 if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
4244 TargetOpcode::G_UBFX, Ty, ExtractTy))
4245 return false;
4246
4247 // Try to match shr (and x, c1), c2
4248 Register AndSrc;
4249 int64_t ShrAmt;
4250 int64_t SMask;
4251 if (!mi_match(Dst, MRI,
4252 m_BinOp(Opcode,
4253 m_OneNonDBGUse(m_GAnd(m_Reg(AndSrc), m_ICst(SMask))),
4254 m_ICst(ShrAmt))))
4255 return false;
4256
4257 const unsigned Size = Ty.getScalarSizeInBits();
4258 if (ShrAmt < 0 || ShrAmt >= Size)
4259 return false;
4260
4261 // If the shift subsumes the mask, emit the 0 directly.
4262 if (0 == (SMask >> ShrAmt)) {
4263 MatchInfo = [=](MachineIRBuilder &B) {
4264 B.buildConstant(Dst, 0);
4265 };
4266 return true;
4267 }
4268
4269 // Check that ubfx can do the extraction, with no holes in the mask.
4270 uint64_t UMask = SMask;
4271 UMask |= maskTrailingOnes<uint64_t>(ShrAmt);
4272 UMask &= maskTrailingOnes<uint64_t>(Size);
4273 if (!isMask_64(UMask))
4274 return false;
4275
4276 // Calculate start position and width of the extract.
4277 const int64_t Pos = ShrAmt;
4278 const int64_t Width = llvm::countr_one(UMask) - ShrAmt;
4279
4280 // It's preferable to keep the shift, rather than form G_SBFX.
4281 // TODO: remove the G_AND via demanded bits analysis.
4282 if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt == Size)
4283 return false;
4284
4285 MatchInfo = [=](MachineIRBuilder &B) {
4286 auto WidthCst = B.buildConstant(ExtractTy, Width);
4287 auto PosCst = B.buildConstant(ExtractTy, Pos);
4288 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst});
4289 };
4290 return true;
4291}
4292
4293bool CombinerHelper::reassociationCanBreakAddressingModePattern(
4294 MachineInstr &PtrAdd) {
4295 assert(PtrAdd.getOpcode() == TargetOpcode::G_PTR_ADD)(static_cast <bool> (PtrAdd.getOpcode() == TargetOpcode
::G_PTR_ADD) ? void (0) : __assert_fail ("PtrAdd.getOpcode() == TargetOpcode::G_PTR_ADD"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4295, __extension__
__PRETTY_FUNCTION__))
;
4296
4297 Register Src1Reg = PtrAdd.getOperand(1).getReg();
4298 MachineInstr *Src1Def = getOpcodeDef(TargetOpcode::G_PTR_ADD, Src1Reg, MRI);
4299 if (!Src1Def)
4300 return false;
4301
4302 Register Src2Reg = PtrAdd.getOperand(2).getReg();
4303
4304 if (MRI.hasOneNonDBGUse(Src1Reg))
4305 return false;
4306
4307 auto C1 = getIConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI);
4308 if (!C1)
4309 return false;
4310 auto C2 = getIConstantVRegVal(Src2Reg, MRI);
4311 if (!C2)
4312 return false;
4313
4314 const APInt &C1APIntVal = *C1;
4315 const APInt &C2APIntVal = *C2;
4316 const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue();
4317
4318 for (auto &UseMI : MRI.use_nodbg_instructions(Src1Reg)) {
4319 // This combine may end up running before ptrtoint/inttoptr combines
4320 // manage to eliminate redundant conversions, so try to look through them.
4321 MachineInstr *ConvUseMI = &UseMI;
4322 unsigned ConvUseOpc = ConvUseMI->getOpcode();
4323 while (ConvUseOpc == TargetOpcode::G_INTTOPTR ||
4324 ConvUseOpc == TargetOpcode::G_PTRTOINT) {
4325 Register DefReg = ConvUseMI->getOperand(0).getReg();
4326 if (!MRI.hasOneNonDBGUse(DefReg))
4327 break;
4328 ConvUseMI = &*MRI.use_instr_nodbg_begin(DefReg);
4329 ConvUseOpc = ConvUseMI->getOpcode();
4330 }
4331 auto LoadStore = ConvUseOpc == TargetOpcode::G_LOAD ||
4332 ConvUseOpc == TargetOpcode::G_STORE;
4333 if (!LoadStore)
4334 continue;
4335 // Is x[offset2] already not a legal addressing mode? If so then
4336 // reassociating the constants breaks nothing (we test offset2 because
4337 // that's the one we hope to fold into the load or store).
4338 TargetLoweringBase::AddrMode AM;
4339 AM.HasBaseReg = true;
4340 AM.BaseOffs = C2APIntVal.getSExtValue();
4341 unsigned AS =
4342 MRI.getType(ConvUseMI->getOperand(1).getReg()).getAddressSpace();
4343 Type *AccessTy =
4344 getTypeForLLT(MRI.getType(ConvUseMI->getOperand(0).getReg()),
4345 PtrAdd.getMF()->getFunction().getContext());
4346 const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering();
4347 if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4348 AccessTy, AS))
4349 continue;
4350
4351 // Would x[offset1+offset2] still be a legal addressing mode?
4352 AM.BaseOffs = CombinedValue;
4353 if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4354 AccessTy, AS))
4355 return true;
4356 }
4357
4358 return false;
4359}
4360
4361bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd &MI,
4362 MachineInstr *RHS,
4363 BuildFnTy &MatchInfo) {
4364 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4365 Register Src1Reg = MI.getOperand(1).getReg();
4366 if (RHS->getOpcode() != TargetOpcode::G_ADD)
4367 return false;
4368 auto C2 = getIConstantVRegVal(RHS->getOperand(2).getReg(), MRI);
4369 if (!C2)
4370 return false;
4371
4372 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4373 LLT PtrTy = MRI.getType(MI.getOperand(0).getReg());
4374
4375 auto NewBase =
4376 Builder.buildPtrAdd(PtrTy, Src1Reg, RHS->getOperand(1).getReg());
4377 Observer.changingInstr(MI);
4378 MI.getOperand(1).setReg(NewBase.getReg(0));
4379 MI.getOperand(2).setReg(RHS->getOperand(2).getReg());
4380 Observer.changedInstr(MI);
4381 };
4382 return !reassociationCanBreakAddressingModePattern(MI);
4383}
4384
4385bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI,
4386 MachineInstr *LHS,
4387 MachineInstr *RHS,
4388 BuildFnTy &MatchInfo) {
4389 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C)
4390 // if and only if (G_PTR_ADD X, C) has one use.
4391 Register LHSBase;
4392 std::optional<ValueAndVReg> LHSCstOff;
4393 if (!mi_match(MI.getBaseReg(), MRI,
4394 m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff)))))
4395 return false;
4396
4397 auto *LHSPtrAdd = cast<GPtrAdd>(LHS);
4398 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4399 // When we change LHSPtrAdd's offset register we might cause it to use a reg
4400 // before its def. Sink the instruction so the outer PTR_ADD to ensure this
4401 // doesn't happen.
4402 LHSPtrAdd->moveBefore(&MI);
4403 Register RHSReg = MI.getOffsetReg();
4404 // set VReg will cause type mismatch if it comes from extend/trunc
4405 auto NewCst = B.buildConstant(MRI.getType(RHSReg), LHSCstOff->Value);
4406 Observer.changingInstr(MI);
4407 MI.getOperand(2).setReg(NewCst.getReg(0));
4408 Observer.changedInstr(MI);
4409 Observer.changingInstr(*LHSPtrAdd);
4410 LHSPtrAdd->getOperand(2).setReg(RHSReg);
4411 Observer.changedInstr(*LHSPtrAdd);
4412 };
4413 return !reassociationCanBreakAddressingModePattern(MI);
4414}
4415
4416bool CombinerHelper::matchReassocFoldConstantsInSubTree(GPtrAdd &MI,
4417 MachineInstr *LHS,
4418 MachineInstr *RHS,
4419 BuildFnTy &MatchInfo) {
4420 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4421 auto *LHSPtrAdd = dyn_cast<GPtrAdd>(LHS);
4422 if (!LHSPtrAdd)
4423 return false;
4424
4425 Register Src2Reg = MI.getOperand(2).getReg();
4426 Register LHSSrc1 = LHSPtrAdd->getBaseReg();
4427 Register LHSSrc2 = LHSPtrAdd->getOffsetReg();
4428 auto C1 = getIConstantVRegVal(LHSSrc2, MRI);
4429 if (!C1)
4430 return false;
4431 auto C2 = getIConstantVRegVal(Src2Reg, MRI);
4432 if (!C2)
4433 return false;
4434
4435 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4436 auto NewCst = B.buildConstant(MRI.getType(Src2Reg), *C1 + *C2);
4437 Observer.changingInstr(MI);
4438 MI.getOperand(1).setReg(LHSSrc1);
4439 MI.getOperand(2).setReg(NewCst.getReg(0));
4440 Observer.changedInstr(MI);
4441 };
4442 return !reassociationCanBreakAddressingModePattern(MI);
4443}
4444
4445bool CombinerHelper::matchReassocPtrAdd(MachineInstr &MI,
4446 BuildFnTy &MatchInfo) {
4447 auto &PtrAdd = cast<GPtrAdd>(MI);
4448 // We're trying to match a few pointer computation patterns here for
4449 // re-association opportunities.
4450 // 1) Isolating a constant operand to be on the RHS, e.g.:
4451 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4452 //
4453 // 2) Folding two constants in each sub-tree as long as such folding
4454 // doesn't break a legal addressing mode.
4455 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4456 //
4457 // 3) Move a constant from the LHS of an inner op to the RHS of the outer.
4458 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> G_PTR_ADD (G_PTR_ADD(X, Y), C)
4459 // iif (G_PTR_ADD X, C) has one use.
4460 MachineInstr *LHS = MRI.getVRegDef(PtrAdd.getBaseReg());
4461 MachineInstr *RHS = MRI.getVRegDef(PtrAdd.getOffsetReg());
4462
4463 // Try to match example 2.
4464 if (matchReassocFoldConstantsInSubTree(PtrAdd, LHS, RHS, MatchInfo))
4465 return true;
4466
4467 // Try to match example 3.
4468 if (matchReassocConstantInnerLHS(PtrAdd, LHS, RHS, MatchInfo))
4469 return true;
4470
4471 // Try to match example 1.
4472 if (matchReassocConstantInnerRHS(PtrAdd, RHS, MatchInfo))
4473 return true;
4474
4475 return false;
4476}
4477
4478bool CombinerHelper::matchConstantFold(MachineInstr &MI, APInt &MatchInfo) {
4479 Register Op1 = MI.getOperand(1).getReg();
4480 Register Op2 = MI.getOperand(2).getReg();
4481 auto MaybeCst = ConstantFoldBinOp(MI.getOpcode(), Op1, Op2, MRI);
4482 if (!MaybeCst)
4483 return false;
4484 MatchInfo = *MaybeCst;
4485 return true;
4486}
4487
4488bool CombinerHelper::matchNarrowBinopFeedingAnd(
4489 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4490 // Look for a binop feeding into an AND with a mask:
4491 //
4492 // %add = G_ADD %lhs, %rhs
4493 // %and = G_AND %add, 000...11111111
4494 //
4495 // Check if it's possible to perform the binop at a narrower width and zext
4496 // back to the original width like so:
4497 //
4498 // %narrow_lhs = G_TRUNC %lhs
4499 // %narrow_rhs = G_TRUNC %rhs
4500 // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs
4501 // %new_add = G_ZEXT %narrow_add
4502 // %and = G_AND %new_add, 000...11111111
4503 //
4504 // This can allow later combines to eliminate the G_AND if it turns out
4505 // that the mask is irrelevant.
4506 assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4506, __extension__
__PRETTY_FUNCTION__))
;
4507 Register Dst = MI.getOperand(0).getReg();
4508 Register AndLHS = MI.getOperand(1).getReg();
4509 Register AndRHS = MI.getOperand(2).getReg();
4510 LLT WideTy = MRI.getType(Dst);
4511
4512 // If the potential binop has more than one use, then it's possible that one
4513 // of those uses will need its full width.
4514 if (!WideTy.isScalar() || !MRI.hasOneNonDBGUse(AndLHS))
4515 return false;
4516
4517 // Check if the LHS feeding the AND is impacted by the high bits that we're
4518 // masking out.
4519 //
4520 // e.g. for 64-bit x, y:
4521 //
4522 // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535
4523 MachineInstr *LHSInst = getDefIgnoringCopies(AndLHS, MRI);
4524 if (!LHSInst)
4525 return false;
4526 unsigned LHSOpc = LHSInst->getOpcode();
4527 switch (LHSOpc) {
4528 default:
4529 return false;
4530 case TargetOpcode::G_ADD:
4531 case TargetOpcode::G_SUB:
4532 case TargetOpcode::G_MUL:
4533 case TargetOpcode::G_AND:
4534 case TargetOpcode::G_OR:
4535 case TargetOpcode::G_XOR:
4536 break;
4537 }
4538
4539 // Find the mask on the RHS.
4540 auto Cst = getIConstantVRegValWithLookThrough(AndRHS, MRI);
4541 if (!Cst)
4542 return false;
4543 auto Mask = Cst->Value;
4544 if (!Mask.isMask())
4545 return false;
4546
4547 // No point in combining if there's nothing to truncate.
4548 unsigned NarrowWidth = Mask.countr_one();
4549 if (NarrowWidth == WideTy.getSizeInBits())
4550 return false;
4551 LLT NarrowTy = LLT::scalar(NarrowWidth);
4552
4553 // Check if adding the zext + truncates could be harmful.
4554 auto &MF = *MI.getMF();
4555 const auto &TLI = getTargetLowering();
4556 LLVMContext &Ctx = MF.getFunction().getContext();
4557 auto &DL = MF.getDataLayout();
4558 if (!TLI.isTruncateFree(WideTy, NarrowTy, DL, Ctx) ||
4559 !TLI.isZExtFree(NarrowTy, WideTy, DL, Ctx))
4560 return false;
4561 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_TRUNC, {NarrowTy, WideTy}}) ||
4562 !isLegalOrBeforeLegalizer({TargetOpcode::G_ZEXT, {WideTy, NarrowTy}}))
4563 return false;
4564 Register BinOpLHS = LHSInst->getOperand(1).getReg();
4565 Register BinOpRHS = LHSInst->getOperand(2).getReg();
4566 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4567 auto NarrowLHS = Builder.buildTrunc(NarrowTy, BinOpLHS);
4568 auto NarrowRHS = Builder.buildTrunc(NarrowTy, BinOpRHS);
4569 auto NarrowBinOp =
4570 Builder.buildInstr(LHSOpc, {NarrowTy}, {NarrowLHS, NarrowRHS});
4571 auto Ext = Builder.buildZExt(WideTy, NarrowBinOp);
4572 Observer.changingInstr(MI);
4573 MI.getOperand(1).setReg(Ext.getReg(0));
4574 Observer.changedInstr(MI);
4575 };
4576 return true;
4577}
4578
4579bool CombinerHelper::matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) {
4580 unsigned Opc = MI.getOpcode();
4581 assert(Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO)(static_cast <bool> (Opc == TargetOpcode::G_UMULO || Opc
== TargetOpcode::G_SMULO) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4581, __extension__
__PRETTY_FUNCTION__))
;
4582
4583 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(2)))
4584 return false;
4585
4586 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4587 Observer.changingInstr(MI);
4588 unsigned NewOpc = Opc == TargetOpcode::G_UMULO ? TargetOpcode::G_UADDO
4589 : TargetOpcode::G_SADDO;
4590 MI.setDesc(Builder.getTII().get(NewOpc));
4591 MI.getOperand(3).setReg(MI.getOperand(2).getReg());
4592 Observer.changedInstr(MI);
4593 };
4594 return true;
4595}
4596
4597bool CombinerHelper::matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) {
4598 // (G_*MULO x, 0) -> 0 + no carry out
4599 assert(MI.getOpcode() == TargetOpcode::G_UMULO ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UMULO
|| MI.getOpcode() == TargetOpcode::G_SMULO) ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UMULO || MI.getOpcode() == TargetOpcode::G_SMULO"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4600, __extension__
__PRETTY_FUNCTION__))
4600 MI.getOpcode() == TargetOpcode::G_SMULO)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UMULO
|| MI.getOpcode() == TargetOpcode::G_SMULO) ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UMULO || MI.getOpcode() == TargetOpcode::G_SMULO"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4600, __extension__
__PRETTY_FUNCTION__))
;
4601 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0)))
4602 return false;
4603 Register Dst = MI.getOperand(0).getReg();
4604 Register Carry = MI.getOperand(1).getReg();
4605 if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Dst)) ||
4606 !isConstantLegalOrBeforeLegalizer(MRI.getType(Carry)))
4607 return false;
4608 MatchInfo = [=](MachineIRBuilder &B) {
4609 B.buildConstant(Dst, 0);
4610 B.buildConstant(Carry, 0);
4611 };
4612 return true;
4613}
4614
4615bool CombinerHelper::matchAddOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) {
4616 // (G_*ADDO x, 0) -> x + no carry out
4617 assert(MI.getOpcode() == TargetOpcode::G_UADDO ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDO
|| MI.getOpcode() == TargetOpcode::G_SADDO) ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UADDO || MI.getOpcode() == TargetOpcode::G_SADDO"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4618, __extension__
__PRETTY_FUNCTION__))
4618 MI.getOpcode() == TargetOpcode::G_SADDO)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDO
|| MI.getOpcode() == TargetOpcode::G_SADDO) ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_UADDO || MI.getOpcode() == TargetOpcode::G_SADDO"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4618, __extension__
__PRETTY_FUNCTION__))
;
4619 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0)))
4620 return false;
4621 Register Carry = MI.getOperand(1).getReg();
4622 if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Carry)))
4623 return false;
4624 Register Dst = MI.getOperand(0).getReg();
4625 Register LHS = MI.getOperand(2).getReg();
4626 MatchInfo = [=](MachineIRBuilder &B) {
4627 B.buildCopy(Dst, LHS);
4628 B.buildConstant(Carry, 0);
4629 };
4630 return true;
4631}
4632
4633bool CombinerHelper::matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) {
4634 // (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
4635 // (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
4636 assert(MI.getOpcode() == TargetOpcode::G_UADDE ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDE
|| MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode()
== TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::
G_SSUBE) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::G_SSUBE"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4639, __extension__
__PRETTY_FUNCTION__))
4637 MI.getOpcode() == TargetOpcode::G_SADDE ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDE
|| MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode()
== TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::
G_SSUBE) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::G_SSUBE"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4639, __extension__
__PRETTY_FUNCTION__))
4638 MI.getOpcode() == TargetOpcode::G_USUBE ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDE
|| MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode()
== TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::
G_SSUBE) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::G_SSUBE"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4639, __extension__
__PRETTY_FUNCTION__))
4639 MI.getOpcode() == TargetOpcode::G_SSUBE)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDE
|| MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode()
== TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::
G_SSUBE) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::G_SSUBE"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4639, __extension__
__PRETTY_FUNCTION__))
;
4640 if (!mi_match(MI.getOperand(4).getReg(), MRI, m_SpecificICstOrSplat(0)))
4641 return false;
4642 MatchInfo = [&](MachineIRBuilder &B) {
4643 unsigned NewOpcode;
1
'NewOpcode' declared without an initial value
4644 switch (MI.getOpcode()) {
2
'Default' branch taken. Execution continues on line 4658
4645 case TargetOpcode::G_UADDE:
4646 NewOpcode = TargetOpcode::G_UADDO;
4647 break;
4648 case TargetOpcode::G_SADDE:
4649 NewOpcode = TargetOpcode::G_SADDO;
4650 break;
4651 case TargetOpcode::G_USUBE:
4652 NewOpcode = TargetOpcode::G_USUBO;
4653 break;
4654 case TargetOpcode::G_SSUBE:
4655 NewOpcode = TargetOpcode::G_SSUBO;
4656 break;
4657 }
4658 Observer.changingInstr(MI);
4659 MI.setDesc(B.getTII().get(NewOpcode));
3
1st function call argument is an uninitialized value
4660 MI.removeOperand(4);
4661 Observer.changedInstr(MI);
4662 };
4663 return true;
4664}
4665
4666bool CombinerHelper::matchSubAddSameReg(MachineInstr &MI,
4667 BuildFnTy &MatchInfo) {
4668 assert(MI.getOpcode() == TargetOpcode::G_SUB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SUB
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SUB"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4668, __extension__
__PRETTY_FUNCTION__))
;
4669 Register Dst = MI.getOperand(0).getReg();
4670 // (x + y) - z -> x (if y == z)
4671 // (x + y) - z -> y (if x == z)
4672 Register X, Y, Z;
4673 if (mi_match(Dst, MRI, m_GSub(m_GAdd(m_Reg(X), m_Reg(Y)), m_Reg(Z)))) {
4674 Register ReplaceReg;
4675 int64_t CstX, CstY;
4676 if (Y == Z || (mi_match(Y, MRI, m_ICstOrSplat(CstY)) &&
4677 mi_match(Z, MRI, m_SpecificICstOrSplat(CstY))))
4678 ReplaceReg = X;
4679 else if (X == Z || (mi_match(X, MRI, m_ICstOrSplat(CstX)) &&
4680 mi_match(Z, MRI, m_SpecificICstOrSplat(CstX))))
4681 ReplaceReg = Y;
4682 if (ReplaceReg) {
4683 MatchInfo = [=](MachineIRBuilder &B) { B.buildCopy(Dst, ReplaceReg); };
4684 return true;
4685 }
4686 }
4687
4688 // x - (y + z) -> 0 - y (if x == z)
4689 // x - (y + z) -> 0 - z (if x == y)
4690 if (mi_match(Dst, MRI, m_GSub(m_Reg(X), m_GAdd(m_Reg(Y), m_Reg(Z))))) {
4691 Register ReplaceReg;
4692 int64_t CstX;
4693 if (X == Z || (mi_match(X, MRI, m_ICstOrSplat(CstX)) &&
4694 mi_match(Z, MRI, m_SpecificICstOrSplat(CstX))))
4695 ReplaceReg = Y;
4696 else if (X == Y || (mi_match(X, MRI, m_ICstOrSplat(CstX)) &&
4697 mi_match(Y, MRI, m_SpecificICstOrSplat(CstX))))
4698 ReplaceReg = Z;
4699 if (ReplaceReg) {
4700 MatchInfo = [=](MachineIRBuilder &B) {
4701 auto Zero = B.buildConstant(MRI.getType(Dst), 0);
4702 B.buildSub(Dst, Zero, ReplaceReg);
4703 };
4704 return true;
4705 }
4706 }
4707 return false;
4708}
4709
4710MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
4711 assert(MI.getOpcode() == TargetOpcode::G_UDIV)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UDIV
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UDIV"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4711, __extension__
__PRETTY_FUNCTION__))
;
4712 auto &UDiv = cast<GenericMachineInstr>(MI);
4713 Register Dst = UDiv.getReg(0);
4714 Register LHS = UDiv.getReg(1);
4715 Register RHS = UDiv.getReg(2);
4716 LLT Ty = MRI.getType(Dst);
4717 LLT ScalarTy = Ty.getScalarType();
4718 const unsigned EltBits = ScalarTy.getScalarSizeInBits();
4719 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4720 LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType();
4721 auto &MIB = Builder;
4722 MIB.setInstrAndDebugLoc(MI);
4723
4724 bool UseNPQ = false;
4725 SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
4726
4727 auto BuildUDIVPattern = [&](const Constant *C) {
4728 auto *CI = cast<ConstantInt>(C);
4729 const APInt &Divisor = CI->getValue();
4730
4731 bool SelNPQ = false;
4732 APInt Magic(Divisor.getBitWidth(), 0);
4733 unsigned PreShift = 0, PostShift = 0;
4734
4735 // Magic algorithm doesn't work for division by 1. We need to emit a select
4736 // at the end.
4737 // TODO: Use undef values for divisor of 1.
4738 if (!Divisor.isOne()) {
4739 UnsignedDivisionByConstantInfo magics =
4740 UnsignedDivisionByConstantInfo::get(Divisor);
4741
4742 Magic = std::move(magics.Magic);
4743
4744 assert(magics.PreShift < Divisor.getBitWidth() &&(static_cast <bool> (magics.PreShift < Divisor.getBitWidth
() && "We shouldn't generate an undefined shift!") ? void
(0) : __assert_fail ("magics.PreShift < Divisor.getBitWidth() && \"We shouldn't generate an undefined shift!\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4745, __extension__
__PRETTY_FUNCTION__))
4745 "We shouldn't generate an undefined shift!")(static_cast <bool> (magics.PreShift < Divisor.getBitWidth
() && "We shouldn't generate an undefined shift!") ? void
(0) : __assert_fail ("magics.PreShift < Divisor.getBitWidth() && \"We shouldn't generate an undefined shift!\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4745, __extension__
__PRETTY_FUNCTION__))
;
4746 assert(magics.PostShift < Divisor.getBitWidth() &&(static_cast <bool> (magics.PostShift < Divisor.getBitWidth
() && "We shouldn't generate an undefined shift!") ? void
(0) : __assert_fail ("magics.PostShift < Divisor.getBitWidth() && \"We shouldn't generate an undefined shift!\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4747, __extension__
__PRETTY_FUNCTION__))
4747 "We shouldn't generate an undefined shift!")(static_cast <bool> (magics.PostShift < Divisor.getBitWidth
() && "We shouldn't generate an undefined shift!") ? void
(0) : __assert_fail ("magics.PostShift < Divisor.getBitWidth() && \"We shouldn't generate an undefined shift!\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4747, __extension__
__PRETTY_FUNCTION__))
;
4748 assert((!magics.IsAdd || magics.PreShift == 0) && "Unexpected pre-shift")(static_cast <bool> ((!magics.IsAdd || magics.PreShift ==
0) && "Unexpected pre-shift") ? void (0) : __assert_fail
("(!magics.IsAdd || magics.PreShift == 0) && \"Unexpected pre-shift\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4748, __extension__
__PRETTY_FUNCTION__))
;
4749 PreShift = magics.PreShift;
4750 PostShift = magics.PostShift;
4751 SelNPQ = magics.IsAdd;
4752 }
4753
4754 PreShifts.push_back(
4755 MIB.buildConstant(ScalarShiftAmtTy, PreShift).getReg(0));
4756 MagicFactors.push_back(MIB.buildConstant(ScalarTy, Magic).getReg(0));
4757 NPQFactors.push_back(
4758 MIB.buildConstant(ScalarTy,
4759 SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1)
4760 : APInt::getZero(EltBits))
4761 .getReg(0));
4762 PostShifts.push_back(
4763 MIB.buildConstant(ScalarShiftAmtTy, PostShift).getReg(0));
4764 UseNPQ |= SelNPQ;
4765 return true;
4766 };
4767
4768 // Collect the shifts/magic values from each element.
4769 bool Matched = matchUnaryPredicate(MRI, RHS, BuildUDIVPattern);
4770 (void)Matched;
4771 assert(Matched && "Expected unary predicate match to succeed")(static_cast <bool> (Matched && "Expected unary predicate match to succeed"
) ? void (0) : __assert_fail ("Matched && \"Expected unary predicate match to succeed\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4771, __extension__
__PRETTY_FUNCTION__))
;
4772
4773 Register PreShift, PostShift, MagicFactor, NPQFactor;
4774 auto *RHSDef = getOpcodeDef<GBuildVector>(RHS, MRI);
4775 if (RHSDef) {
4776 PreShift = MIB.buildBuildVector(ShiftAmtTy, PreShifts).getReg(0);
4777 MagicFactor = MIB.buildBuildVector(Ty, MagicFactors).getReg(0);
4778 NPQFactor = MIB.buildBuildVector(Ty, NPQFactors).getReg(0);
4779 PostShift = MIB.buildBuildVector(ShiftAmtTy, PostShifts).getReg(0);
4780 } else {
4781 assert(MRI.getType(RHS).isScalar() &&(static_cast <bool> (MRI.getType(RHS).isScalar() &&
"Non-build_vector operation should have been a scalar") ? void
(0) : __assert_fail ("MRI.getType(RHS).isScalar() && \"Non-build_vector operation should have been a scalar\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4782, __extension__
__PRETTY_FUNCTION__))
4782 "Non-build_vector operation should have been a scalar")(static_cast <bool> (MRI.getType(RHS).isScalar() &&
"Non-build_vector operation should have been a scalar") ? void
(0) : __assert_fail ("MRI.getType(RHS).isScalar() && \"Non-build_vector operation should have been a scalar\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4782, __extension__
__PRETTY_FUNCTION__))
;
4783 PreShift = PreShifts[0];
4784 MagicFactor = MagicFactors[0];
4785 PostShift = PostShifts[0];
4786 }
4787
4788 Register Q = LHS;
4789 Q = MIB.buildLShr(Ty, Q, PreShift).getReg(0);
4790
4791 // Multiply the numerator (operand 0) by the magic value.
4792 Q = MIB.buildUMulH(Ty, Q, MagicFactor).getReg(0);
4793
4794 if (UseNPQ) {
4795 Register NPQ = MIB.buildSub(Ty, LHS, Q).getReg(0);
4796
4797 // For vectors we might have a mix of non-NPQ/NPQ paths, so use
4798 // G_UMULH to act as a SRL-by-1 for NPQ, else multiply by zero.
4799 if (Ty.isVector())
4800 NPQ = MIB.buildUMulH(Ty, NPQ, NPQFactor).getReg(0);
4801 else
4802 NPQ = MIB.buildLShr(Ty, NPQ, MIB.buildConstant(ShiftAmtTy, 1)).getReg(0);
4803
4804 Q = MIB.buildAdd(Ty, NPQ, Q).getReg(0);
4805 }
4806
4807 Q = MIB.buildLShr(Ty, Q, PostShift).getReg(0);
4808 auto One = MIB.buildConstant(Ty, 1);
4809 auto IsOne = MIB.buildICmp(
4810 CmpInst::Predicate::ICMP_EQ,
4811 Ty.isScalar() ? LLT::scalar(1) : Ty.changeElementSize(1), RHS, One);
4812 return MIB.buildSelect(Ty, IsOne, LHS, Q);
4813}
4814
4815bool CombinerHelper::matchUDivByConst(MachineInstr &MI) {
4816 assert(MI.getOpcode() == TargetOpcode::G_UDIV)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UDIV
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UDIV"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4816, __extension__
__PRETTY_FUNCTION__))
;
4817 Register Dst = MI.getOperand(0).getReg();
4818 Register RHS = MI.getOperand(2).getReg();
4819 LLT DstTy = MRI.getType(Dst);
4820 auto *RHSDef = MRI.getVRegDef(RHS);
4821 if (!isConstantOrConstantVector(*RHSDef, MRI))
4822 return false;
4823
4824 auto &MF = *MI.getMF();
4825 AttributeList Attr = MF.getFunction().getAttributes();
4826 const auto &TLI = getTargetLowering();
4827 LLVMContext &Ctx = MF.getFunction().getContext();
4828 auto &DL = MF.getDataLayout();
4829 if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr))
4830 return false;
4831
4832 // Don't do this for minsize because the instruction sequence is usually
4833 // larger.
4834 if (MF.getFunction().hasMinSize())
4835 return false;
4836
4837 // Don't do this if the types are not going to be legal.
4838 if (LI) {
4839 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_MUL, {DstTy, DstTy}}))
4840 return false;
4841 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_UMULH, {DstTy}}))
4842 return false;
4843 if (!isLegalOrBeforeLegalizer(
4844 {TargetOpcode::G_ICMP,
4845 {DstTy.isVector() ? DstTy.changeElementSize(1) : LLT::scalar(1),
4846 DstTy}}))
4847 return false;
4848 }
4849
4850 auto CheckEltValue = [&](const Constant *C) {
4851 if (auto *CI = dyn_cast_or_null<ConstantInt>(C))
4852 return !CI->isZero();
4853 return false;
4854 };
4855 return matchUnaryPredicate(MRI, RHS, CheckEltValue);
4856}
4857
4858void CombinerHelper::applyUDivByConst(MachineInstr &MI) {
4859 auto *NewMI = buildUDivUsingMul(MI);
4860 replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg());
4861}
4862
4863bool CombinerHelper::matchSDivByConst(MachineInstr &MI) {
4864 assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SDIV
&& "Expected SDIV") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SDIV && \"Expected SDIV\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4864, __extension__
__PRETTY_FUNCTION__))
;
4865 Register Dst = MI.getOperand(0).getReg();
4866 Register RHS = MI.getOperand(2).getReg();
4867 LLT DstTy = MRI.getType(Dst);
4868
4869 auto &MF = *MI.getMF();
4870 AttributeList Attr = MF.getFunction().getAttributes();
4871 const auto &TLI = getTargetLowering();
4872 LLVMContext &Ctx = MF.getFunction().getContext();
4873 auto &DL = MF.getDataLayout();
4874 if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr))
4875 return false;
4876
4877 // Don't do this for minsize because the instruction sequence is usually
4878 // larger.
4879 if (MF.getFunction().hasMinSize())
4880 return false;
4881
4882 // If the sdiv has an 'exact' flag we can use a simpler lowering.
4883 if (MI.getFlag(MachineInstr::MIFlag::IsExact)) {
4884 return matchUnaryPredicate(
4885 MRI, RHS, [](const Constant *C) { return C && !C->isZeroValue(); });
4886 }
4887
4888 // Don't support the general case for now.
4889 return false;
4890}
4891
4892void CombinerHelper::applySDivByConst(MachineInstr &MI) {
4893 auto *NewMI = buildSDivUsingMul(MI);
4894 replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg());
4895}
4896
4897MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) {
4898 assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SDIV
&& "Expected SDIV") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SDIV && \"Expected SDIV\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4898, __extension__
__PRETTY_FUNCTION__))
;
4899 auto &SDiv = cast<GenericMachineInstr>(MI);
4900 Register Dst = SDiv.getReg(0);
4901 Register LHS = SDiv.getReg(1);
4902 Register RHS = SDiv.getReg(2);
4903 LLT Ty = MRI.getType(Dst);
4904 LLT ScalarTy = Ty.getScalarType();
4905 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4906 LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType();
4907 auto &MIB = Builder;
4908 MIB.setInstrAndDebugLoc(MI);
4909
4910 bool UseSRA = false;
4911 SmallVector<Register, 16> Shifts, Factors;
4912
4913 auto *RHSDef = cast<GenericMachineInstr>(getDefIgnoringCopies(RHS, MRI));
4914 bool IsSplat = getIConstantSplatVal(*RHSDef, MRI).has_value();
4915
4916 auto BuildSDIVPattern = [&](const Constant *C) {
4917 // Don't recompute inverses for each splat element.
4918 if (IsSplat && !Factors.empty()) {
4919 Shifts.push_back(Shifts[0]);
4920 Factors.push_back(Factors[0]);
4921 return true;
4922 }
4923
4924 auto *CI = cast<ConstantInt>(C);
4925 APInt Divisor = CI->getValue();
4926 unsigned Shift = Divisor.countr_zero();
4927 if (Shift) {
4928 Divisor.ashrInPlace(Shift);
4929 UseSRA = true;
4930 }
4931
4932 // Calculate the multiplicative inverse modulo BW.
4933 // 2^W requires W + 1 bits, so we have to extend and then truncate.
4934 unsigned W = Divisor.getBitWidth();
4935 APInt Factor = Divisor.zext(W + 1)
4936 .multiplicativeInverse(APInt::getSignedMinValue(W + 1))
4937 .trunc(W);
4938 Shifts.push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0));
4939 Factors.push_back(MIB.buildConstant(ScalarTy, Factor).getReg(0));
4940 return true;
4941 };
4942
4943 // Collect all magic values from the build vector.
4944 bool Matched = matchUnaryPredicate(MRI, RHS, BuildSDIVPattern);
4945 (void)Matched;
4946 assert(Matched && "Expected unary predicate match to succeed")(static_cast <bool> (Matched && "Expected unary predicate match to succeed"
) ? void (0) : __assert_fail ("Matched && \"Expected unary predicate match to succeed\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4946, __extension__
__PRETTY_FUNCTION__))
;
4947
4948 Register Shift, Factor;
4949 if (Ty.isVector()) {
4950 Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0);
4951 Factor = MIB.buildBuildVector(Ty, Factors).getReg(0);
4952 } else {
4953 Shift = Shifts[0];
4954 Factor = Factors[0];
4955 }
4956
4957 Register Res = LHS;
4958
4959 if (UseSRA)
4960 Res = MIB.buildAShr(Ty, Res, Shift, MachineInstr::IsExact).getReg(0);
4961
4962 return MIB.buildMul(Ty, Res, Factor);
4963}
4964
4965bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) {
4966 assert(MI.getOpcode() == TargetOpcode::G_UMULH)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UMULH
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UMULH"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4966, __extension__
__PRETTY_FUNCTION__))
;
4967 Register RHS = MI.getOperand(2).getReg();
4968 Register Dst = MI.getOperand(0).getReg();
4969 LLT Ty = MRI.getType(Dst);
4970 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4971 auto MatchPow2ExceptOne = [&](const Constant *C) {
4972 if (auto *CI = dyn_cast<ConstantInt>(C))
4973 return CI->getValue().isPowerOf2() && !CI->getValue().isOne();
4974 return false;
4975 };
4976 if (!matchUnaryPredicate(MRI, RHS, MatchPow2ExceptOne, false))
4977 return false;
4978 return isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR, {Ty, ShiftAmtTy}});
4979}
4980
4981void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) {
4982 Register LHS = MI.getOperand(1).getReg();
4983 Register RHS = MI.getOperand(2).getReg();
4984 Register Dst = MI.getOperand(0).getReg();
4985 LLT Ty = MRI.getType(Dst);
4986 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4987 unsigned NumEltBits = Ty.getScalarSizeInBits();
4988
4989 Builder.setInstrAndDebugLoc(MI);
4990 auto LogBase2 = buildLogBase2(RHS, Builder);
4991 auto ShiftAmt =
4992 Builder.buildSub(Ty, Builder.buildConstant(Ty, NumEltBits), LogBase2);
4993 auto Trunc = Builder.buildZExtOrTrunc(ShiftAmtTy, ShiftAmt);
4994 Builder.buildLShr(Dst, LHS, Trunc);
4995 MI.eraseFromParent();
4996}
4997
4998bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI,
4999 BuildFnTy &MatchInfo) {
5000 unsigned Opc = MI.getOpcode();
5001 assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB ||(static_cast <bool> (Opc == TargetOpcode::G_FADD || Opc
== TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc
== TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc
== TargetOpcode::G_FMA) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5003, __extension__
__PRETTY_FUNCTION__))
5002 Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||(static_cast <bool> (Opc == TargetOpcode::G_FADD || Opc
== TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc
== TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc
== TargetOpcode::G_FMA) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5003, __extension__
__PRETTY_FUNCTION__))
5003 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA)(static_cast <bool> (Opc == TargetOpcode::G_FADD || Opc
== TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc
== TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc
== TargetOpcode::G_FMA) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5003, __extension__
__PRETTY_FUNCTION__))
;
5004
5005 Register Dst = MI.getOperand(0).getReg();
5006 Register X = MI.getOperand(1).getReg();
5007 Register Y = MI.getOperand(2).getReg();
5008 LLT Type = MRI.getType(Dst);
5009
5010 // fold (fadd x, fneg(y)) -> (fsub x, y)
5011 // fold (fadd fneg(y), x) -> (fsub x, y)
5012 // G_ADD is commutative so both cases are checked by m_GFAdd
5013 if (mi_match(Dst, MRI, m_GFAdd(m_Reg(X), m_GFNeg(m_Reg(Y)))) &&
5014 isLegalOrBeforeLegalizer({TargetOpcode::G_FSUB, {Type}})) {
5015 Opc = TargetOpcode::G_FSUB;
5016 }
5017 /// fold (fsub x, fneg(y)) -> (fadd x, y)
5018 else if (mi_match(Dst, MRI, m_GFSub(m_Reg(X), m_GFNeg(m_Reg(Y)))) &&
5019 isLegalOrBeforeLegalizer({TargetOpcode::G_FADD, {Type}})) {
5020 Opc = TargetOpcode::G_FADD;
5021 }
5022 // fold (fmul fneg(x), fneg(y)) -> (fmul x, y)
5023 // fold (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
5024 // fold (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
5025 // fold (fma fneg(x), fneg(y), z) -> (fma x, y, z)
5026 else if ((Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
5027 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) &&
5028 mi_match(X, MRI, m_GFNeg(m_Reg(X))) &&
5029 mi_match(Y, MRI, m_GFNeg(m_Reg(Y)))) {
5030 // no opcode change
5031 } else
5032 return false;
5033
5034 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5035 Observer.changingInstr(MI);
5036 MI.setDesc(B.getTII().get(Opc));
5037 MI.getOperand(1).setReg(X);
5038 MI.getOperand(2).setReg(Y);
5039 Observer.changedInstr(MI);
5040 };
5041 return true;
5042}
5043
5044bool CombinerHelper::matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) {
5045 assert(MI.getOpcode() == TargetOpcode::G_FSUB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FSUB
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FSUB"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5045, __extension__
__PRETTY_FUNCTION__))
;
5046
5047 Register LHS = MI.getOperand(1).getReg();
5048 MatchInfo = MI.getOperand(2).getReg();
5049 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
5050
5051 const auto LHSCst = Ty.isVector()
5052 ? getFConstantSplat(LHS, MRI, /* allowUndef */ true)
5053 : getFConstantVRegValWithLookThrough(LHS, MRI);
5054 if (!LHSCst)
5055 return false;
5056
5057 // -0.0 is always allowed
5058 if (LHSCst->Value.isNegZero())
5059 return true;
5060
5061 // +0.0 is only allowed if nsz is set.
5062 if (LHSCst->Value.isPosZero())
5063 return MI.getFlag(MachineInstr::FmNsz);
5064
5065 return false;
5066}
5067
5068void CombinerHelper::applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) {
5069 Builder.setInstrAndDebugLoc(MI);
5070 Register Dst = MI.getOperand(0).getReg();
5071 Builder.buildFNeg(
5072 Dst, Builder.buildFCanonicalize(MRI.getType(Dst), MatchInfo).getReg(0));
5073 eraseInst(MI);
5074}
5075
5076/// Checks if \p MI is TargetOpcode::G_FMUL and contractable either
5077/// due to global flags or MachineInstr flags.
5078static bool isContractableFMul(MachineInstr &MI, bool AllowFusionGlobally) {
5079 if (MI.getOpcode() != TargetOpcode::G_FMUL)
5080 return false;
5081 return AllowFusionGlobally || MI.getFlag(MachineInstr::MIFlag::FmContract);
5082}
5083
5084static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1,
5085 const MachineRegisterInfo &MRI) {
5086 return std::distance(MRI.use_instr_nodbg_begin(MI0.getOperand(0).getReg()),
5087 MRI.use_instr_nodbg_end()) >
5088 std::distance(MRI.use_instr_nodbg_begin(MI1.getOperand(0).getReg()),
5089 MRI.use_instr_nodbg_end());
5090}
5091
5092bool CombinerHelper::canCombineFMadOrFMA(MachineInstr &MI,
5093 bool &AllowFusionGlobally,
5094 bool &HasFMAD, bool &Aggressive,
5095 bool CanReassociate) {
5096
5097 auto *MF = MI.getMF();
5098 const auto &TLI = *MF->getSubtarget().getTargetLowering();
5099 const TargetOptions &Options = MF->getTarget().Options;
5100 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5101
5102 if (CanReassociate &&
5103 !(Options.UnsafeFPMath || MI.getFlag(MachineInstr::MIFlag::FmReassoc)))
5104 return false;
5105
5106 // Floating-point multiply-add with intermediate rounding.
5107 HasFMAD = (!isPreLegalize() && TLI.isFMADLegal(MI, DstType));
5108 // Floating-point multiply-add without intermediate rounding.
5109 bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) &&
5110 isLegalOrBeforeLegalizer({TargetOpcode::G_FMA, {DstType}});
5111 // No valid opcode, do not combine.
5112 if (!HasFMAD && !HasFMA)
5113 return false;
5114
5115 AllowFusionGlobally = Options.AllowFPOpFusion == FPOpFusion::Fast ||
5116 Options.UnsafeFPMath || HasFMAD;
5117 // If the addition is not contractable, do not combine.
5118 if (!AllowFusionGlobally && !MI.getFlag(MachineInstr::MIFlag::FmContract))
5119 return false;
5120
5121 Aggressive = TLI.enableAggressiveFMAFusion(DstType);
5122 return true;
5123}
5124
5125bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA(
5126 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5127 assert(MI.getOpcode() == TargetOpcode::G_FADD)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FADD
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FADD"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5127, __extension__
__PRETTY_FUNCTION__))
;
5128
5129 bool AllowFusionGlobally, HasFMAD, Aggressive;
5130 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5131 return false;
5132
5133 Register Op1 = MI.getOperand(1).getReg();
5134 Register Op2 = MI.getOperand(2).getReg();
5135 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5136 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5137 unsigned PreferredFusedOpcode =
5138 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5139
5140 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5141 // prefer to fold the multiply with fewer uses.
5142 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5143 isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5144 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5145 std::swap(LHS, RHS);
5146 }
5147
5148 // fold (fadd (fmul x, y), z) -> (fma x, y, z)
5149 if (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5150 (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg))) {
5151 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5152 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5153 {LHS.MI->getOperand(1).getReg(),
5154 LHS.MI->getOperand(2).getReg(), RHS.Reg});
5155 };
5156 return true;
5157 }
5158
5159 // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
5160 if (isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5161 (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg))) {
5162 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5163 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5164 {RHS.MI->getOperand(1).getReg(),
5165 RHS.MI->getOperand(2).getReg(), LHS.Reg});
5166 };
5167 return true;
5168 }
5169
5170 return false;
5171}
5172
5173bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA(
5174 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5175 assert(MI.getOpcode() == TargetOpcode::G_FADD)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FADD
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FADD"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5175, __extension__
__PRETTY_FUNCTION__))
;
5176
5177 bool AllowFusionGlobally, HasFMAD, Aggressive;
5178 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5179 return false;
5180
5181 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5182 Register Op1 = MI.getOperand(1).getReg();
5183 Register Op2 = MI.getOperand(2).getReg();
5184 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5185 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5186 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5187
5188 unsigned PreferredFusedOpcode =
5189 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5190
5191 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5192 // prefer to fold the multiply with fewer uses.
5193 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5194 isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5195 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5196 std::swap(LHS, RHS);
5197 }
5198
5199 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
5200 MachineInstr *FpExtSrc;
5201 if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) &&
5202 isContractableFMul(*FpExtSrc, AllowFusionGlobally) &&
5203 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5204 MRI.getType(FpExtSrc->getOperand(1).getReg()))) {
5205 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5206 auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg());
5207 auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg());
5208 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5209 {FpExtX.getReg(0), FpExtY.getReg(0), RHS.Reg});
5210 };
5211 return true;
5212 }
5213
5214 // fold (fadd z, (fpext (fmul x, y))) -> (fma (fpext x), (fpext y), z)
5215 // Note: Commutes FADD operands.
5216 if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) &&
5217 isContractableFMul(*FpExtSrc, AllowFusionGlobally) &&
5218 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5219 MRI.getType(FpExtSrc->getOperand(1).getReg()))) {
5220 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5221 auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg());
5222 auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg());
5223 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5224 {FpExtX.getReg(0), FpExtY.getReg(0), LHS.Reg});
5225 };
5226 return true;
5227 }
5228
5229 return false;
5230}
5231
5232bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
5233 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5234 assert(MI.getOpcode() == TargetOpcode::G_FADD)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FADD
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FADD"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5234, __extension__
__PRETTY_FUNCTION__))
;
5235
5236 bool AllowFusionGlobally, HasFMAD, Aggressive;
5237 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive, true))
5238 return false;
5239
5240 Register Op1 = MI.getOperand(1).getReg();
5241 Register Op2 = MI.getOperand(2).getReg();
5242 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5243 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5244 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5245
5246 unsigned PreferredFusedOpcode =
5247 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5248
5249 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5250 // prefer to fold the multiply with fewer uses.
5251 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5252 isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5253 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5254 std::swap(LHS, RHS);
5255 }
5256
5257 MachineInstr *FMA = nullptr;
5258 Register Z;
5259 // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
5260 if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
5261 (MRI.getVRegDef(LHS.MI->getOperand(3).getReg())->getOpcode() ==
5262 TargetOpcode::G_FMUL) &&
5263 MRI.hasOneNonDBGUse(LHS.MI->getOperand(0).getReg()) &&
5264 MRI.hasOneNonDBGUse(LHS.MI->getOperand(3).getReg())) {
5265 FMA = LHS.MI;
5266 Z = RHS.Reg;
5267 }
5268 // fold (fadd z, (fma x, y, (fmul u, v))) -> (fma x, y, (fma u, v, z))
5269 else if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
5270 (MRI.getVRegDef(RHS.MI->getOperand(3).getReg())->getOpcode() ==
5271 TargetOpcode::G_FMUL) &&
5272 MRI.hasOneNonDBGUse(RHS.MI->getOperand(0).getReg()) &&
5273 MRI.hasOneNonDBGUse(RHS.MI->getOperand(3).getReg())) {
5274 Z = LHS.Reg;
5275 FMA = RHS.MI;
5276 }
5277
5278 if (FMA) {
5279 MachineInstr *FMulMI = MRI.getVRegDef(FMA->getOperand(3).getReg());
5280 Register X = FMA->getOperand(1).getReg();
5281 Register Y = FMA->getOperand(2).getReg();
5282 Register U = FMulMI->getOperand(1).getReg();
5283 Register V = FMulMI->getOperand(2).getReg();
5284
5285 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5286 Register InnerFMA = MRI.createGenericVirtualRegister(DstTy);
5287 B.buildInstr(PreferredFusedOpcode, {InnerFMA}, {U, V, Z});
5288 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5289 {X, Y, InnerFMA});
5290 };
5291 return true;
5292 }
5293
5294 return false;
5295}
5296
5297bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
5298 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5299 assert(MI.getOpcode() == TargetOpcode::G_FADD)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FADD
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FADD"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5299, __extension__
__PRETTY_FUNCTION__))
;
5300
5301 bool AllowFusionGlobally, HasFMAD, Aggressive;
5302 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5303 return false;
5304
5305 if (!Aggressive)
5306 return false;
5307
5308 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5309 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5310 Register Op1 = MI.getOperand(1).getReg();
5311 Register Op2 = MI.getOperand(2).getReg();
5312 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5313 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5314
5315 unsigned PreferredFusedOpcode =
5316 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5317
5318 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5319 // prefer to fold the multiply with fewer uses.
5320 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5321 isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5322 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5323 std::swap(LHS, RHS);
5324 }
5325
5326 // Builds: (fma x, y, (fma (fpext u), (fpext v), z))
5327 auto buildMatchInfo = [=, &MI](Register U, Register V, Register Z, Register X,
5328 Register Y, MachineIRBuilder &B) {
5329 Register FpExtU = B.buildFPExt(DstType, U).getReg(0);
5330 Register FpExtV = B.buildFPExt(DstType, V).getReg(0);
5331 Register InnerFMA =
5332 B.buildInstr(PreferredFusedOpcode, {DstType}, {FpExtU, FpExtV, Z})
5333 .getReg(0);
5334 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5335 {X, Y, InnerFMA});
5336 };
5337
5338 MachineInstr *FMulMI, *FMAMI;
5339 // fold (fadd (fma x, y, (fpext (fmul u, v))), z)
5340 // -> (fma x, y, (fma (fpext u), (fpext v), z))
5341 if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
5342 mi_match(LHS.MI->getOperand(3).getReg(), MRI,
5343 m_GFPExt(m_MInstr(FMulMI))) &&
5344 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5345 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5346 MRI.getType(FMulMI->getOperand(0).getReg()))) {
5347 MatchInfo = [=](MachineIRBuilder &B) {
5348 buildMatchInfo(FMulMI->getOperand(1).getReg(),
5349 FMulMI->getOperand(2).getReg(), RHS.Reg,
5350 LHS.MI->getOperand(1).getReg(),
5351 LHS.MI->getOperand(2).getReg(), B);
5352 };
5353 return true;
5354 }
5355
5356 // fold (fadd (fpext (fma x, y, (fmul u, v))), z)
5357 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
5358 // FIXME: This turns two single-precision and one double-precision
5359 // operation into two double-precision operations, which might not be
5360 // interesting for all targets, especially GPUs.
5361 if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) &&
5362 FMAMI->getOpcode() == PreferredFusedOpcode) {
5363 MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
5364 if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5365 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5366 MRI.getType(FMAMI->getOperand(0).getReg()))) {
5367 MatchInfo = [=](MachineIRBuilder &B) {
5368 Register X = FMAMI->getOperand(1).getReg();
5369 Register Y = FMAMI->getOperand(2).getReg();
5370 X = B.buildFPExt(DstType, X).getReg(0);
5371 Y = B.buildFPExt(DstType, Y).getReg(0);
5372 buildMatchInfo(FMulMI->getOperand(1).getReg(),
5373 FMulMI->getOperand(2).getReg(), RHS.Reg, X, Y, B);
5374 };
5375
5376 return true;
5377 }
5378 }
5379
5380 // fold (fadd z, (fma x, y, (fpext (fmul u, v)))
5381 // -> (fma x, y, (fma (fpext u), (fpext v), z))
5382 if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
5383 mi_match(RHS.MI->getOperand(3).getReg(), MRI,
5384 m_GFPExt(m_MInstr(FMulMI))) &&
5385 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5386 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5387 MRI.getType(FMulMI->getOperand(0).getReg()))) {
5388 MatchInfo = [=](MachineIRBuilder &B) {
5389 buildMatchInfo(FMulMI->getOperand(1).getReg(),
5390 FMulMI->getOperand(2).getReg(), LHS.Reg,
5391 RHS.MI->getOperand(1).getReg(),
5392 RHS.MI->getOperand(2).getReg(), B);
5393 };
5394 return true;
5395 }
5396
5397 // fold (fadd z, (fpext (fma x, y, (fmul u, v)))
5398 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
5399 // FIXME: This turns two single-precision and one double-precision
5400 // operation into two double-precision operations, which might not be
5401 // interesting for all targets, especially GPUs.
5402 if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) &&
5403 FMAMI->getOpcode() == PreferredFusedOpcode) {
5404 MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
5405 if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5406 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5407 MRI.getType(FMAMI->getOperand(0).getReg()))) {
5408 MatchInfo = [=](MachineIRBuilder &B) {
5409 Register X = FMAMI->getOperand(1).getReg();
5410 Register Y = FMAMI->getOperand(2).getReg();
5411 X = B.buildFPExt(DstType, X).getReg(0);
5412 Y = B.buildFPExt(DstType, Y).getReg(0);
5413 buildMatchInfo(FMulMI->getOperand(1).getReg(),
5414 FMulMI->getOperand(2).getReg(), LHS.Reg, X, Y, B);
5415 };
5416 return true;
5417 }
5418 }
5419
5420 return false;
5421}
5422
5423bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA(
5424 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5425 assert(MI.getOpcode() == TargetOpcode::G_FSUB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FSUB
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FSUB"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5425, __extension__
__PRETTY_FUNCTION__))
;
5426
5427 bool AllowFusionGlobally, HasFMAD, Aggressive;
5428 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5429 return false;
5430
5431 Register Op1 = MI.getOperand(1).getReg();
5432 Register Op2 = MI.getOperand(2).getReg();
5433 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5434 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5435 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5436
5437 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5438 // prefer to fold the multiply with fewer uses.
5439 int FirstMulHasFewerUses = true;
5440 if (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5441 isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5442 hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5443 FirstMulHasFewerUses = false;
5444
5445 unsigned PreferredFusedOpcode =
5446 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5447
5448 // fold (fsub (fmul x, y), z) -> (fma x, y, -z)
5449 if (FirstMulHasFewerUses &&
5450 (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5451 (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg)))) {
5452 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5453 Register NegZ = B.buildFNeg(DstTy, RHS.Reg).getReg(0);
5454 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5455 {LHS.MI->getOperand(1).getReg(),
5456 LHS.MI->getOperand(2).getReg(), NegZ});
5457 };
5458 return true;
5459 }
5460 // fold (fsub x, (fmul y, z)) -> (fma -y, z, x)
5461 else if ((isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5462 (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg)))) {
5463 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5464 Register NegY =
5465 B.buildFNeg(DstTy, RHS.MI->getOperand(1).getReg()).getReg(0);
5466 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5467 {NegY, RHS.MI->getOperand(2).getReg(), LHS.Reg});
5468 };
5469 return true;
5470 }
5471
5472 return false;
5473}
5474
5475bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA(
5476 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5477 assert(MI.getOpcode() == TargetOpcode::G_FSUB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FSUB
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FSUB"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5477, __extension__
__PRETTY_FUNCTION__))
;
5478
5479 bool AllowFusionGlobally, HasFMAD, Aggressive;
5480 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5481 return false;
5482
5483 Register LHSReg = MI.getOperand(1).getReg();
5484 Register RHSReg = MI.getOperand(2).getReg();
5485 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5486
5487 unsigned PreferredFusedOpcode =
5488 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5489
5490 MachineInstr *FMulMI;
5491 // fold (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
5492 if (mi_match(LHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) &&
5493 (Aggressive || (MRI.hasOneNonDBGUse(LHSReg) &&
5494 MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) &&
5495 isContractableFMul(*FMulMI, AllowFusionGlobally)) {
5496 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5497 Register NegX =
5498 B.buildFNeg(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5499 Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0);
5500 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5501 {NegX, FMulMI->getOperand(2).getReg(), NegZ});
5502 };
5503 return true;
5504 }
5505
5506 // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
5507 if (mi_match(RHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) &&
5508 (Aggressive || (MRI.hasOneNonDBGUse(RHSReg) &&
5509 MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) &&
5510 isContractableFMul(*FMulMI, AllowFusionGlobally)) {
5511 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5512 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5513 {FMulMI->getOperand(1).getReg(),
5514 FMulMI->getOperand(2).getReg(), LHSReg});
5515 };
5516 return true;
5517 }
5518
5519 return false;
5520}
5521
5522bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA(
5523 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5524 assert(MI.getOpcode() == TargetOpcode::G_FSUB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FSUB
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FSUB"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5524, __extension__
__PRETTY_FUNCTION__))
;
5525
5526 bool AllowFusionGlobally, HasFMAD, Aggressive;
5527 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5528 return false;
5529
5530 Register LHSReg = MI.getOperand(1).getReg();
5531 Register RHSReg = MI.getOperand(2).getReg();
5532 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5533
5534 unsigned PreferredFusedOpcode =
5535 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5536
5537 MachineInstr *FMulMI;
5538 // fold (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z))
5539 if (mi_match(LHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) &&
5540 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5541 (Aggressive || MRI.hasOneNonDBGUse(LHSReg))) {
5542 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5543 Register FpExtX =
5544 B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5545 Register FpExtY =
5546 B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0);
5547 Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0);
5548 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5549 {FpExtX, FpExtY, NegZ});
5550 };
5551 return true;
5552 }
5553
5554 // fold (fsub x, (fpext (fmul y, z))) -> (fma (fneg (fpext y)), (fpext z), x)
5555 if (mi_match(RHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) &&
5556 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5557 (Aggressive || MRI.hasOneNonDBGUse(RHSReg))) {
5558 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5559 Register FpExtY =
5560 B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5561 Register NegY = B.buildFNeg(DstTy, FpExtY).getReg(0);
5562 Register FpExtZ =
5563 B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0);
5564 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5565 {NegY, FpExtZ, LHSReg});
5566 };
5567 return true;
5568 }
5569
5570 return false;
5571}
5572
5573bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA(
5574 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5575 assert(MI.getOpcode() == TargetOpcode::G_FSUB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FSUB
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FSUB"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5575, __extension__
__PRETTY_FUNCTION__))
;
5576
5577 bool AllowFusionGlobally, HasFMAD, Aggressive;
5578 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5579 return false;
5580
5581 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5582 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5583 Register LHSReg = MI.getOperand(1).getReg();
5584 Register RHSReg = MI.getOperand(2).getReg();
5585
5586 unsigned PreferredFusedOpcode =
5587 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5588
5589 auto buildMatchInfo = [=](Register Dst, Register X, Register Y, Register Z,
5590 MachineIRBuilder &B) {
5591 Register FpExtX = B.buildFPExt(DstTy, X).getReg(0);
5592 Register FpExtY = B.buildFPExt(DstTy, Y).getReg(0);
5593 B.buildInstr(PreferredFusedOpcode, {Dst}, {FpExtX, FpExtY, Z});
5594 };
5595
5596 MachineInstr *FMulMI;
5597 // fold (fsub (fpext (fneg (fmul x, y))), z) ->
5598 // (fneg (fma (fpext x), (fpext y), z))
5599 // fold (fsub (fneg (fpext (fmul x, y))), z) ->
5600 // (fneg (fma (fpext x), (fpext y), z))
5601 if ((mi_match(LHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) ||
5602 mi_match(LHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) &&
5603 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5604 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy,
5605 MRI.getType(FMulMI->getOperand(0).getReg()))) {
5606 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5607 Register FMAReg = MRI.createGenericVirtualRegister(DstTy);
5608 buildMatchInfo(FMAReg, FMulMI->getOperand(1).getReg(),
5609 FMulMI->getOperand(2).getReg(), RHSReg, B);
5610 B.buildFNeg(MI.getOperand(0).getReg(), FMAReg);
5611 };
5612 return true;
5613 }
5614
5615 // fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
5616 // fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
5617 if ((mi_match(RHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) ||
5618 mi_match(RHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) &&
5619 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5620 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy,
5621 MRI.getType(FMulMI->getOperand(0).getReg()))) {
5622 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5623 buildMatchInfo(MI.getOperand(0).getReg(), FMulMI->getOperand(1).getReg(),
5624 FMulMI->getOperand(2).getReg(), LHSReg, B);
5625 };
5626 return true;
5627 }
5628
5629 return false;
5630}
5631
5632bool CombinerHelper::matchSelectToLogical(MachineInstr &MI,
5633 BuildFnTy &MatchInfo) {
5634 GSelect &Sel = cast<GSelect>(MI);
5635 Register DstReg = Sel.getReg(0);
5636 Register Cond = Sel.getCondReg();
5637 Register TrueReg = Sel.getTrueReg();
5638 Register FalseReg = Sel.getFalseReg();
5639
5640 auto *TrueDef = getDefIgnoringCopies(TrueReg, MRI);
5641 auto *FalseDef = getDefIgnoringCopies(FalseReg, MRI);
5642
5643 const LLT CondTy = MRI.getType(Cond);
5644 const LLT OpTy = MRI.getType(TrueReg);
5645 if (CondTy != OpTy || OpTy.getScalarSizeInBits() != 1)
5646 return false;
5647
5648 // We have a boolean select.
5649
5650 // select Cond, Cond, F --> or Cond, F
5651 // select Cond, 1, F --> or Cond, F
5652 auto MaybeCstTrue = isConstantOrConstantSplatVector(*TrueDef, MRI);
5653 if (Cond == TrueReg || (MaybeCstTrue && MaybeCstTrue->isOne())) {
5654 MatchInfo = [=](MachineIRBuilder &MIB) {
5655 MIB.buildOr(DstReg, Cond, FalseReg);
5656 };
5657 return true;
5658 }
5659
5660 // select Cond, T, Cond --> and Cond, T
5661 // select Cond, T, 0 --> and Cond, T
5662 auto MaybeCstFalse = isConstantOrConstantSplatVector(*FalseDef, MRI);
5663 if (Cond == FalseReg || (MaybeCstFalse && MaybeCstFalse->isZero())) {
5664 MatchInfo = [=](MachineIRBuilder &MIB) {
5665 MIB.buildAnd(DstReg, Cond, TrueReg);
5666 };
5667 return true;
5668 }
5669
5670 // select Cond, T, 1 --> or (not Cond), T
5671 if (MaybeCstFalse && MaybeCstFalse->isOne()) {
5672 MatchInfo = [=](MachineIRBuilder &MIB) {
5673 MIB.buildOr(DstReg, MIB.buildNot(OpTy, Cond), TrueReg);
5674 };
5675 return true;
5676 }
5677
5678 // select Cond, 0, F --> and (not Cond), F
5679 if (MaybeCstTrue && MaybeCstTrue->isZero()) {
5680 MatchInfo = [=](MachineIRBuilder &MIB) {
5681 MIB.buildAnd(DstReg, MIB.buildNot(OpTy, Cond), FalseReg);
5682 };
5683 return true;
5684 }
5685 return false;
5686}
5687
5688bool CombinerHelper::matchCombineFMinMaxNaN(MachineInstr &MI,
5689 unsigned &IdxToPropagate) {
5690 bool PropagateNaN;
5691 switch (MI.getOpcode()) {
5692 default:
5693 return false;
5694 case TargetOpcode::G_FMINNUM:
5695 case TargetOpcode::G_FMAXNUM:
5696 PropagateNaN = false;
5697 break;
5698 case TargetOpcode::G_FMINIMUM:
5699 case TargetOpcode::G_FMAXIMUM:
5700 PropagateNaN = true;
5701 break;
5702 }
5703
5704 auto MatchNaN = [&](unsigned Idx) {
5705 Register MaybeNaNReg = MI.getOperand(Idx).getReg();
5706 const ConstantFP *MaybeCst = getConstantFPVRegVal(MaybeNaNReg, MRI);
5707 if (!MaybeCst || !MaybeCst->getValueAPF().isNaN())
5708 return false;
5709 IdxToPropagate = PropagateNaN ? Idx : (Idx == 1 ? 2 : 1);
5710 return true;
5711 };
5712
5713 return MatchNaN(1) || MatchNaN(2);
5714}
5715
5716bool CombinerHelper::matchAddSubSameReg(MachineInstr &MI, Register &Src) {
5717 assert(MI.getOpcode() == TargetOpcode::G_ADD && "Expected a G_ADD")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ADD
&& "Expected a G_ADD") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ADD && \"Expected a G_ADD\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5717, __extension__
__PRETTY_FUNCTION__))
;
5718 Register LHS = MI.getOperand(1).getReg();
5719 Register RHS = MI.getOperand(2).getReg();
5720
5721 // Helper lambda to check for opportunities for
5722 // A + (B - A) -> B
5723 // (B - A) + A -> B
5724 auto CheckFold = [&](Register MaybeSub, Register MaybeSameReg) {
5725 Register Reg;
5726 return mi_match(MaybeSub, MRI, m_GSub(m_Reg(Src), m_Reg(Reg))) &&
5727 Reg == MaybeSameReg;
5728 };
5729 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
5730}
5731
5732bool CombinerHelper::matchBuildVectorIdentityFold(MachineInstr &MI,
5733 Register &MatchInfo) {
5734 // This combine folds the following patterns:
5735 //
5736 // G_BUILD_VECTOR_TRUNC (G_BITCAST(x), G_LSHR(G_BITCAST(x), k))
5737 // G_BUILD_VECTOR(G_TRUNC(G_BITCAST(x)), G_TRUNC(G_LSHR(G_BITCAST(x), k)))
5738 // into
5739 // x
5740 // if
5741 // k == sizeof(VecEltTy)/2
5742 // type(x) == type(dst)
5743 //
5744 // G_BUILD_VECTOR(G_TRUNC(G_BITCAST(x)), undef)
5745 // into
5746 // x
5747 // if
5748 // type(x) == type(dst)
5749
5750 LLT DstVecTy = MRI.getType(MI.getOperand(0).getReg());
5751 LLT DstEltTy = DstVecTy.getElementType();
5752
5753 Register Lo, Hi;
5754
5755 if (mi_match(
5756 MI, MRI,
5757 m_GBuildVector(m_GTrunc(m_GBitcast(m_Reg(Lo))), m_GImplicitDef()))) {
5758 MatchInfo = Lo;
5759 return MRI.getType(MatchInfo) == DstVecTy;
5760 }
5761
5762 std::optional<ValueAndVReg> ShiftAmount;
5763 const auto LoPattern = m_GBitcast(m_Reg(Lo));
5764 const auto HiPattern = m_GLShr(m_GBitcast(m_Reg(Hi)), m_GCst(ShiftAmount));
5765 if (mi_match(
5766 MI, MRI,
5767 m_any_of(m_GBuildVectorTrunc(LoPattern, HiPattern),
5768 m_GBuildVector(m_GTrunc(LoPattern), m_GTrunc(HiPattern))))) {
5769 if (Lo == Hi && ShiftAmount->Value == DstEltTy.getSizeInBits()) {
5770 MatchInfo = Lo;
5771 return MRI.getType(MatchInfo) == DstVecTy;
5772 }
5773 }
5774
5775 return false;
5776}
5777
5778bool CombinerHelper::matchTruncBuildVectorFold(MachineInstr &MI,
5779 Register &MatchInfo) {
5780 // Replace (G_TRUNC (G_BITCAST (G_BUILD_VECTOR x, y)) with just x
5781 // if type(x) == type(G_TRUNC)
5782 if (!mi_match(MI.getOperand(1).getReg(), MRI,
5783 m_GBitcast(m_GBuildVector(m_Reg(MatchInfo), m_Reg()))))
5784 return false;
5785
5786 return MRI.getType(MatchInfo) == MRI.getType(MI.getOperand(0).getReg());
5787}
5788
5789bool CombinerHelper::matchTruncLshrBuildVectorFold(MachineInstr &MI,
5790 Register &MatchInfo) {
5791 // Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with
5792 // y if K == size of vector element type
5793 std::optional<ValueAndVReg> ShiftAmt;
5794 if (!mi_match(MI.getOperand(1).getReg(), MRI,
5795 m_GLShr(m_GBitcast(m_GBuildVector(m_Reg(), m_Reg(MatchInfo))),
5796 m_GCst(ShiftAmt))))
5797 return false;
5798
5799 LLT MatchTy = MRI.getType(MatchInfo);
5800 return ShiftAmt->Value.getZExtValue() == MatchTy.getSizeInBits() &&
5801 MatchTy == MRI.getType(MI.getOperand(0).getReg());
5802}
5803
5804unsigned CombinerHelper::getFPMinMaxOpcForSelect(
5805 CmpInst::Predicate Pred, LLT DstTy,
5806 SelectPatternNaNBehaviour VsNaNRetVal) const {
5807 assert(VsNaNRetVal != SelectPatternNaNBehaviour::NOT_APPLICABLE &&(static_cast <bool> (VsNaNRetVal != SelectPatternNaNBehaviour
::NOT_APPLICABLE && "Expected a NaN behaviour?") ? void
(0) : __assert_fail ("VsNaNRetVal != SelectPatternNaNBehaviour::NOT_APPLICABLE && \"Expected a NaN behaviour?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5808, __extension__
__PRETTY_FUNCTION__))
5808 "Expected a NaN behaviour?")(static_cast <bool> (VsNaNRetVal != SelectPatternNaNBehaviour
::NOT_APPLICABLE && "Expected a NaN behaviour?") ? void
(0) : __assert_fail ("VsNaNRetVal != SelectPatternNaNBehaviour::NOT_APPLICABLE && \"Expected a NaN behaviour?\""
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5808, __extension__
__PRETTY_FUNCTION__))
;
5809 // Choose an opcode based off of legality or the behaviour when one of the
5810 // LHS/RHS may be NaN.
5811 switch (Pred) {
5812 default:
5813 return 0;
5814 case CmpInst::FCMP_UGT:
5815 case CmpInst::FCMP_UGE:
5816 case CmpInst::FCMP_OGT:
5817 case CmpInst::FCMP_OGE:
5818 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER)
5819 return TargetOpcode::G_FMAXNUM;
5820 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN)
5821 return TargetOpcode::G_FMAXIMUM;
5822 if (isLegal({TargetOpcode::G_FMAXNUM, {DstTy}}))
5823 return TargetOpcode::G_FMAXNUM;
5824 if (isLegal({TargetOpcode::G_FMAXIMUM, {DstTy}}))
5825 return TargetOpcode::G_FMAXIMUM;
5826 return 0;
5827 case CmpInst::FCMP_ULT:
5828 case CmpInst::FCMP_ULE:
5829 case CmpInst::FCMP_OLT:
5830 case CmpInst::FCMP_OLE:
5831 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER)
5832 return TargetOpcode::G_FMINNUM;
5833 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN)
5834 return TargetOpcode::G_FMINIMUM;
5835 if (isLegal({TargetOpcode::G_FMINNUM, {DstTy}}))
5836 return TargetOpcode::G_FMINNUM;
5837 if (!isLegal({TargetOpcode::G_FMINIMUM, {DstTy}}))
5838 return 0;
5839 return TargetOpcode::G_FMINIMUM;
5840 }
5841}
5842
5843CombinerHelper::SelectPatternNaNBehaviour
5844CombinerHelper::computeRetValAgainstNaN(Register LHS, Register RHS,
5845 bool IsOrderedComparison) const {
5846 bool LHSSafe = isKnownNeverNaN(LHS, MRI);
5847 bool RHSSafe = isKnownNeverNaN(RHS, MRI);
5848 // Completely unsafe.
5849 if (!LHSSafe && !RHSSafe)
5850 return SelectPatternNaNBehaviour::NOT_APPLICABLE;
5851 if (LHSSafe && RHSSafe)
5852 return SelectPatternNaNBehaviour::RETURNS_ANY;
5853 // An ordered comparison will return false when given a NaN, so it
5854 // returns the RHS.
5855 if (IsOrderedComparison)
5856 return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_NAN
5857 : SelectPatternNaNBehaviour::RETURNS_OTHER;
5858 // An unordered comparison will return true when given a NaN, so it
5859 // returns the LHS.
5860 return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_OTHER
5861 : SelectPatternNaNBehaviour::RETURNS_NAN;
5862}
5863
5864bool CombinerHelper::matchFPSelectToMinMax(Register Dst, Register Cond,
5865 Register TrueVal, Register FalseVal,
5866 BuildFnTy &MatchInfo) {
5867 // Match: select (fcmp cond x, y) x, y
5868 // select (fcmp cond x, y) y, x
5869 // And turn it into fminnum/fmaxnum or fmin/fmax based off of the condition.
5870 LLT DstTy = MRI.getType(Dst);
5871 // Bail out early on pointers, since we'll never want to fold to a min/max.
5872 if (DstTy.isPointer())
5873 return false;
5874 // Match a floating point compare with a less-than/greater-than predicate.
5875 // TODO: Allow multiple users of the compare if they are all selects.
5876 CmpInst::Predicate Pred;
5877 Register CmpLHS, CmpRHS;
5878 if (!mi_match(Cond, MRI,
5879 m_OneNonDBGUse(
5880 m_GFCmp(m_Pred(Pred), m_Reg(CmpLHS), m_Reg(CmpRHS)))) ||
5881 CmpInst::isEquality(Pred))
5882 return false;
5883 SelectPatternNaNBehaviour ResWithKnownNaNInfo =
5884 computeRetValAgainstNaN(CmpLHS, CmpRHS, CmpInst::isOrdered(Pred));
5885 if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::NOT_APPLICABLE)
5886 return false;
5887 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5888 std::swap(CmpLHS, CmpRHS);
5889 Pred = CmpInst::getSwappedPredicate(Pred);
5890 if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_NAN)
5891 ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_OTHER;
5892 else if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_OTHER)
5893 ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_NAN;
5894 }
5895 if (TrueVal != CmpLHS || FalseVal != CmpRHS)
5896 return false;
5897 // Decide what type of max/min this should be based off of the predicate.
5898 unsigned Opc = getFPMinMaxOpcForSelect(Pred, DstTy, ResWithKnownNaNInfo);
5899 if (!Opc || !isLegal({Opc, {DstTy}}))
5900 return false;
5901 // Comparisons between signed zero and zero may have different results...
5902 // unless we have fmaximum/fminimum. In that case, we know -0 < 0.
5903 if (Opc != TargetOpcode::G_FMAXIMUM && Opc != TargetOpcode::G_FMINIMUM) {
5904 // We don't know if a comparison between two 0s will give us a consistent
5905 // result. Be conservative and only proceed if at least one side is
5906 // non-zero.
5907 auto KnownNonZeroSide = getFConstantVRegValWithLookThrough(CmpLHS, MRI);
5908 if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero()) {
5909 KnownNonZeroSide = getFConstantVRegValWithLookThrough(CmpRHS, MRI);
5910 if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero())
5911 return false;
5912 }
5913 }
5914 MatchInfo = [=](MachineIRBuilder &B) {
5915 B.buildInstr(Opc, {Dst}, {CmpLHS, CmpRHS});
5916 };
5917 return true;
5918}
5919
5920bool CombinerHelper::matchSimplifySelectToMinMax(MachineInstr &MI,
5921 BuildFnTy &MatchInfo) {
5922 // TODO: Handle integer cases.
5923 assert(MI.getOpcode() == TargetOpcode::G_SELECT)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SELECT
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SELECT"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5923, __extension__
__PRETTY_FUNCTION__))
;
5924 // Condition may be fed by a truncated compare.
5925 Register Cond = MI.getOperand(1).getReg();
5926 Register MaybeTrunc;
5927 if (mi_match(Cond, MRI, m_OneNonDBGUse(m_GTrunc(m_Reg(MaybeTrunc)))))
5928 Cond = MaybeTrunc;
5929 Register Dst = MI.getOperand(0).getReg();
5930 Register TrueVal = MI.getOperand(2).getReg();
5931 Register FalseVal = MI.getOperand(3).getReg();
5932 return matchFPSelectToMinMax(Dst, Cond, TrueVal, FalseVal, MatchInfo);
5933}
5934
5935bool CombinerHelper::matchRedundantBinOpInEquality(MachineInstr &MI,
5936 BuildFnTy &MatchInfo) {
5937 assert(MI.getOpcode() == TargetOpcode::G_ICMP)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ICMP
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ICMP"
, "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5937, __extension__
__PRETTY_FUNCTION__))
;
5938 // (X + Y) == X --> Y == 0
5939 // (X + Y) != X --> Y != 0
5940 // (X - Y) == X --> Y == 0
5941 // (X - Y) != X --> Y != 0
5942 // (X ^ Y) == X --> Y == 0
5943 // (X ^ Y) != X --> Y != 0
5944 Register Dst = MI.getOperand(0).getReg();
5945 CmpInst::Predicate Pred;
5946 Register X, Y, OpLHS, OpRHS;
5947 bool MatchedSub = mi_match(
5948 Dst, MRI,
5949 m_c_GICmp(m_Pred(Pred), m_Reg(X), m_GSub(m_Reg(OpLHS), m_Reg(Y))));
5950 if (MatchedSub && X != OpLHS)
5951 return false;
5952 if (!MatchedSub) {
5953 if (!mi_match(Dst, MRI,
5954 m_c_GICmp(m_Pred(Pred), m_Reg(X),
5955 m_any_of(m_GAdd(m_Reg(OpLHS), m_Reg(OpRHS)),
5956 m_GXor(m_Reg(OpLHS), m_Reg(OpRHS))))))
5957 return false;
5958 Y = X == OpLHS ? OpRHS : X == OpRHS ? OpLHS : Register();
5959 }
5960 MatchInfo = [=](MachineIRBuilder &B) {
5961 auto Zero = B.buildConstant(MRI.getType(Y), 0);
5962 B.buildICmp(Pred, Dst, Y, Zero);
5963 };
5964 return CmpInst::isEquality(Pred) && Y.isValid();
5965}
5966
5967bool CombinerHelper::matchShiftsTooBig(MachineInstr &MI) {
5968 Register ShiftReg = MI.getOperand(2).getReg();
5969 LLT ResTy = MRI.getType(MI.getOperand(0).getReg());
5970 auto IsShiftTooBig = [&](const Constant *C) {
5971 auto *CI = dyn_cast<ConstantInt>(C);
5972 return CI && CI->uge(ResTy.getScalarSizeInBits());
5973 };
5974 return matchUnaryPredicate(MRI, ShiftReg, IsShiftTooBig);
5975}
5976
5977bool CombinerHelper::tryCombine(MachineInstr &MI) {
5978 if (tryCombineCopy(MI))
5979 return true;
5980 if (tryCombineExtendingLoads(MI))
5981 return true;
5982 if (tryCombineIndexedLoadStore(MI))
5983 return true;
5984 return false;
5985}