File: | build/source/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp |
Warning: | line 4893, column 16 1st function call argument is an uninitialized value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | #include "llvm/CodeGen/GlobalISel/CombinerHelper.h" | |||
9 | #include "llvm/ADT/SetVector.h" | |||
10 | #include "llvm/ADT/SmallBitVector.h" | |||
11 | #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" | |||
12 | #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" | |||
13 | #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" | |||
14 | #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" | |||
15 | #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" | |||
16 | #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" | |||
17 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" | |||
18 | #include "llvm/CodeGen/GlobalISel/Utils.h" | |||
19 | #include "llvm/CodeGen/LowLevelType.h" | |||
20 | #include "llvm/CodeGen/MachineBasicBlock.h" | |||
21 | #include "llvm/CodeGen/MachineDominators.h" | |||
22 | #include "llvm/CodeGen/MachineInstr.h" | |||
23 | #include "llvm/CodeGen/MachineMemOperand.h" | |||
24 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
25 | #include "llvm/CodeGen/RegisterBankInfo.h" | |||
26 | #include "llvm/CodeGen/TargetInstrInfo.h" | |||
27 | #include "llvm/CodeGen/TargetLowering.h" | |||
28 | #include "llvm/CodeGen/TargetOpcodes.h" | |||
29 | #include "llvm/IR/DataLayout.h" | |||
30 | #include "llvm/IR/InstrTypes.h" | |||
31 | #include "llvm/Support/Casting.h" | |||
32 | #include "llvm/Support/DivisionByConstantInfo.h" | |||
33 | #include "llvm/Support/MathExtras.h" | |||
34 | #include "llvm/Target/TargetMachine.h" | |||
35 | #include <cmath> | |||
36 | #include <optional> | |||
37 | #include <tuple> | |||
38 | ||||
39 | #define DEBUG_TYPE"gi-combiner" "gi-combiner" | |||
40 | ||||
41 | using namespace llvm; | |||
42 | using namespace MIPatternMatch; | |||
43 | ||||
44 | // Option to allow testing of the combiner while no targets know about indexed | |||
45 | // addressing. | |||
46 | static cl::opt<bool> | |||
47 | ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false), | |||
48 | cl::desc("Force all indexed operations to be " | |||
49 | "legal for the GlobalISel combiner")); | |||
50 | ||||
51 | CombinerHelper::CombinerHelper(GISelChangeObserver &Observer, | |||
52 | MachineIRBuilder &B, bool IsPreLegalize, | |||
53 | GISelKnownBits *KB, MachineDominatorTree *MDT, | |||
54 | const LegalizerInfo *LI) | |||
55 | : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB), | |||
56 | MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI), | |||
57 | RBI(Builder.getMF().getSubtarget().getRegBankInfo()), | |||
58 | TRI(Builder.getMF().getSubtarget().getRegisterInfo()) { | |||
59 | (void)this->KB; | |||
60 | } | |||
61 | ||||
62 | const TargetLowering &CombinerHelper::getTargetLowering() const { | |||
63 | return *Builder.getMF().getSubtarget().getTargetLowering(); | |||
64 | } | |||
65 | ||||
66 | /// \returns The little endian in-memory byte position of byte \p I in a | |||
67 | /// \p ByteWidth bytes wide type. | |||
68 | /// | |||
69 | /// E.g. Given a 4-byte type x, x[0] -> byte 0 | |||
70 | static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) { | |||
71 | assert(I < ByteWidth && "I must be in [0, ByteWidth)")(static_cast <bool> (I < ByteWidth && "I must be in [0, ByteWidth)" ) ? void (0) : __assert_fail ("I < ByteWidth && \"I must be in [0, ByteWidth)\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 71, __extension__ __PRETTY_FUNCTION__)); | |||
72 | return I; | |||
73 | } | |||
74 | ||||
75 | /// Determines the LogBase2 value for a non-null input value using the | |||
76 | /// transform: LogBase2(V) = (EltBits - 1) - ctlz(V). | |||
77 | static Register buildLogBase2(Register V, MachineIRBuilder &MIB) { | |||
78 | auto &MRI = *MIB.getMRI(); | |||
79 | LLT Ty = MRI.getType(V); | |||
80 | auto Ctlz = MIB.buildCTLZ(Ty, V); | |||
81 | auto Base = MIB.buildConstant(Ty, Ty.getScalarSizeInBits() - 1); | |||
82 | return MIB.buildSub(Ty, Base, Ctlz).getReg(0); | |||
83 | } | |||
84 | ||||
85 | /// \returns The big endian in-memory byte position of byte \p I in a | |||
86 | /// \p ByteWidth bytes wide type. | |||
87 | /// | |||
88 | /// E.g. Given a 4-byte type x, x[0] -> byte 3 | |||
89 | static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) { | |||
90 | assert(I < ByteWidth && "I must be in [0, ByteWidth)")(static_cast <bool> (I < ByteWidth && "I must be in [0, ByteWidth)" ) ? void (0) : __assert_fail ("I < ByteWidth && \"I must be in [0, ByteWidth)\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 90, __extension__ __PRETTY_FUNCTION__)); | |||
91 | return ByteWidth - I - 1; | |||
92 | } | |||
93 | ||||
94 | /// Given a map from byte offsets in memory to indices in a load/store, | |||
95 | /// determine if that map corresponds to a little or big endian byte pattern. | |||
96 | /// | |||
97 | /// \param MemOffset2Idx maps memory offsets to address offsets. | |||
98 | /// \param LowestIdx is the lowest index in \p MemOffset2Idx. | |||
99 | /// | |||
100 | /// \returns true if the map corresponds to a big endian byte pattern, false if | |||
101 | /// it corresponds to a little endian byte pattern, and std::nullopt otherwise. | |||
102 | /// | |||
103 | /// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns | |||
104 | /// are as follows: | |||
105 | /// | |||
106 | /// AddrOffset Little endian Big endian | |||
107 | /// 0 0 3 | |||
108 | /// 1 1 2 | |||
109 | /// 2 2 1 | |||
110 | /// 3 3 0 | |||
111 | static std::optional<bool> | |||
112 | isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, | |||
113 | int64_t LowestIdx) { | |||
114 | // Need at least two byte positions to decide on endianness. | |||
115 | unsigned Width = MemOffset2Idx.size(); | |||
116 | if (Width < 2) | |||
117 | return std::nullopt; | |||
118 | bool BigEndian = true, LittleEndian = true; | |||
119 | for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) { | |||
120 | auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset); | |||
121 | if (MemOffsetAndIdx == MemOffset2Idx.end()) | |||
122 | return std::nullopt; | |||
123 | const int64_t Idx = MemOffsetAndIdx->second - LowestIdx; | |||
124 | assert(Idx >= 0 && "Expected non-negative byte offset?")(static_cast <bool> (Idx >= 0 && "Expected non-negative byte offset?" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Expected non-negative byte offset?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 124, __extension__ __PRETTY_FUNCTION__)); | |||
125 | LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset); | |||
126 | BigEndian &= Idx == bigEndianByteAt(Width, MemOffset); | |||
127 | if (!BigEndian && !LittleEndian) | |||
128 | return std::nullopt; | |||
129 | } | |||
130 | ||||
131 | assert((BigEndian != LittleEndian) &&(static_cast <bool> ((BigEndian != LittleEndian) && "Pattern cannot be both big and little endian!") ? void (0) : __assert_fail ("(BigEndian != LittleEndian) && \"Pattern cannot be both big and little endian!\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 132, __extension__ __PRETTY_FUNCTION__)) | |||
132 | "Pattern cannot be both big and little endian!")(static_cast <bool> ((BigEndian != LittleEndian) && "Pattern cannot be both big and little endian!") ? void (0) : __assert_fail ("(BigEndian != LittleEndian) && \"Pattern cannot be both big and little endian!\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 132, __extension__ __PRETTY_FUNCTION__)); | |||
133 | return BigEndian; | |||
134 | } | |||
135 | ||||
136 | bool CombinerHelper::isPreLegalize() const { return IsPreLegalize; } | |||
137 | ||||
138 | bool CombinerHelper::isLegal(const LegalityQuery &Query) const { | |||
139 | assert(LI && "Must have LegalizerInfo to query isLegal!")(static_cast <bool> (LI && "Must have LegalizerInfo to query isLegal!" ) ? void (0) : __assert_fail ("LI && \"Must have LegalizerInfo to query isLegal!\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 139, __extension__ __PRETTY_FUNCTION__)); | |||
140 | return LI->getAction(Query).Action == LegalizeActions::Legal; | |||
141 | } | |||
142 | ||||
143 | bool CombinerHelper::isLegalOrBeforeLegalizer( | |||
144 | const LegalityQuery &Query) const { | |||
145 | return isPreLegalize() || isLegal(Query); | |||
146 | } | |||
147 | ||||
148 | bool CombinerHelper::isConstantLegalOrBeforeLegalizer(const LLT Ty) const { | |||
149 | if (!Ty.isVector()) | |||
150 | return isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {Ty}}); | |||
151 | // Vector constants are represented as a G_BUILD_VECTOR of scalar G_CONSTANTs. | |||
152 | if (isPreLegalize()) | |||
153 | return true; | |||
154 | LLT EltTy = Ty.getElementType(); | |||
155 | return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) && | |||
156 | isLegal({TargetOpcode::G_CONSTANT, {EltTy}}); | |||
157 | } | |||
158 | ||||
159 | void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, | |||
160 | Register ToReg) const { | |||
161 | Observer.changingAllUsesOfReg(MRI, FromReg); | |||
162 | ||||
163 | if (MRI.constrainRegAttrs(ToReg, FromReg)) | |||
164 | MRI.replaceRegWith(FromReg, ToReg); | |||
165 | else | |||
166 | Builder.buildCopy(ToReg, FromReg); | |||
167 | ||||
168 | Observer.finishedChangingAllUsesOfReg(); | |||
169 | } | |||
170 | ||||
171 | void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI, | |||
172 | MachineOperand &FromRegOp, | |||
173 | Register ToReg) const { | |||
174 | assert(FromRegOp.getParent() && "Expected an operand in an MI")(static_cast <bool> (FromRegOp.getParent() && "Expected an operand in an MI" ) ? void (0) : __assert_fail ("FromRegOp.getParent() && \"Expected an operand in an MI\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 174, __extension__ __PRETTY_FUNCTION__)); | |||
175 | Observer.changingInstr(*FromRegOp.getParent()); | |||
176 | ||||
177 | FromRegOp.setReg(ToReg); | |||
178 | ||||
179 | Observer.changedInstr(*FromRegOp.getParent()); | |||
180 | } | |||
181 | ||||
182 | void CombinerHelper::replaceOpcodeWith(MachineInstr &FromMI, | |||
183 | unsigned ToOpcode) const { | |||
184 | Observer.changingInstr(FromMI); | |||
185 | ||||
186 | FromMI.setDesc(Builder.getTII().get(ToOpcode)); | |||
187 | ||||
188 | Observer.changedInstr(FromMI); | |||
189 | } | |||
190 | ||||
191 | const RegisterBank *CombinerHelper::getRegBank(Register Reg) const { | |||
192 | return RBI->getRegBank(Reg, MRI, *TRI); | |||
193 | } | |||
194 | ||||
195 | void CombinerHelper::setRegBank(Register Reg, const RegisterBank *RegBank) { | |||
196 | if (RegBank) | |||
197 | MRI.setRegBank(Reg, *RegBank); | |||
198 | } | |||
199 | ||||
200 | bool CombinerHelper::tryCombineCopy(MachineInstr &MI) { | |||
201 | if (matchCombineCopy(MI)) { | |||
202 | applyCombineCopy(MI); | |||
203 | return true; | |||
204 | } | |||
205 | return false; | |||
206 | } | |||
207 | bool CombinerHelper::matchCombineCopy(MachineInstr &MI) { | |||
208 | if (MI.getOpcode() != TargetOpcode::COPY) | |||
209 | return false; | |||
210 | Register DstReg = MI.getOperand(0).getReg(); | |||
211 | Register SrcReg = MI.getOperand(1).getReg(); | |||
212 | return canReplaceReg(DstReg, SrcReg, MRI); | |||
213 | } | |||
214 | void CombinerHelper::applyCombineCopy(MachineInstr &MI) { | |||
215 | Register DstReg = MI.getOperand(0).getReg(); | |||
216 | Register SrcReg = MI.getOperand(1).getReg(); | |||
217 | MI.eraseFromParent(); | |||
218 | replaceRegWith(MRI, DstReg, SrcReg); | |||
219 | } | |||
220 | ||||
221 | bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) { | |||
222 | bool IsUndef = false; | |||
223 | SmallVector<Register, 4> Ops; | |||
224 | if (matchCombineConcatVectors(MI, IsUndef, Ops)) { | |||
225 | applyCombineConcatVectors(MI, IsUndef, Ops); | |||
226 | return true; | |||
227 | } | |||
228 | return false; | |||
229 | } | |||
230 | ||||
231 | bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef, | |||
232 | SmallVectorImpl<Register> &Ops) { | |||
233 | assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && "Invalid instruction") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && \"Invalid instruction\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 234, __extension__ __PRETTY_FUNCTION__)) | |||
234 | "Invalid instruction")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && "Invalid instruction") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && \"Invalid instruction\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 234, __extension__ __PRETTY_FUNCTION__)); | |||
235 | IsUndef = true; | |||
236 | MachineInstr *Undef = nullptr; | |||
237 | ||||
238 | // Walk over all the operands of concat vectors and check if they are | |||
239 | // build_vector themselves or undef. | |||
240 | // Then collect their operands in Ops. | |||
241 | for (const MachineOperand &MO : MI.uses()) { | |||
242 | Register Reg = MO.getReg(); | |||
243 | MachineInstr *Def = MRI.getVRegDef(Reg); | |||
244 | assert(Def && "Operand not defined")(static_cast <bool> (Def && "Operand not defined" ) ? void (0) : __assert_fail ("Def && \"Operand not defined\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 244, __extension__ __PRETTY_FUNCTION__)); | |||
245 | switch (Def->getOpcode()) { | |||
246 | case TargetOpcode::G_BUILD_VECTOR: | |||
247 | IsUndef = false; | |||
248 | // Remember the operands of the build_vector to fold | |||
249 | // them into the yet-to-build flattened concat vectors. | |||
250 | for (const MachineOperand &BuildVecMO : Def->uses()) | |||
251 | Ops.push_back(BuildVecMO.getReg()); | |||
252 | break; | |||
253 | case TargetOpcode::G_IMPLICIT_DEF: { | |||
254 | LLT OpType = MRI.getType(Reg); | |||
255 | // Keep one undef value for all the undef operands. | |||
256 | if (!Undef) { | |||
257 | Builder.setInsertPt(*MI.getParent(), MI); | |||
258 | Undef = Builder.buildUndef(OpType.getScalarType()); | |||
259 | } | |||
260 | assert(MRI.getType(Undef->getOperand(0).getReg()) ==(static_cast <bool> (MRI.getType(Undef->getOperand(0 ).getReg()) == OpType.getScalarType() && "All undefs should have the same type" ) ? void (0) : __assert_fail ("MRI.getType(Undef->getOperand(0).getReg()) == OpType.getScalarType() && \"All undefs should have the same type\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 262, __extension__ __PRETTY_FUNCTION__)) | |||
261 | OpType.getScalarType() &&(static_cast <bool> (MRI.getType(Undef->getOperand(0 ).getReg()) == OpType.getScalarType() && "All undefs should have the same type" ) ? void (0) : __assert_fail ("MRI.getType(Undef->getOperand(0).getReg()) == OpType.getScalarType() && \"All undefs should have the same type\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 262, __extension__ __PRETTY_FUNCTION__)) | |||
262 | "All undefs should have the same type")(static_cast <bool> (MRI.getType(Undef->getOperand(0 ).getReg()) == OpType.getScalarType() && "All undefs should have the same type" ) ? void (0) : __assert_fail ("MRI.getType(Undef->getOperand(0).getReg()) == OpType.getScalarType() && \"All undefs should have the same type\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 262, __extension__ __PRETTY_FUNCTION__)); | |||
263 | // Break the undef vector in as many scalar elements as needed | |||
264 | // for the flattening. | |||
265 | for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements(); | |||
266 | EltIdx != EltEnd; ++EltIdx) | |||
267 | Ops.push_back(Undef->getOperand(0).getReg()); | |||
268 | break; | |||
269 | } | |||
270 | default: | |||
271 | return false; | |||
272 | } | |||
273 | } | |||
274 | return true; | |||
275 | } | |||
276 | void CombinerHelper::applyCombineConcatVectors( | |||
277 | MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) { | |||
278 | // We determined that the concat_vectors can be flatten. | |||
279 | // Generate the flattened build_vector. | |||
280 | Register DstReg = MI.getOperand(0).getReg(); | |||
281 | Builder.setInsertPt(*MI.getParent(), MI); | |||
282 | Register NewDstReg = MRI.cloneVirtualRegister(DstReg); | |||
283 | ||||
284 | // Note: IsUndef is sort of redundant. We could have determine it by | |||
285 | // checking that at all Ops are undef. Alternatively, we could have | |||
286 | // generate a build_vector of undefs and rely on another combine to | |||
287 | // clean that up. For now, given we already gather this information | |||
288 | // in tryCombineConcatVectors, just save compile time and issue the | |||
289 | // right thing. | |||
290 | if (IsUndef) | |||
291 | Builder.buildUndef(NewDstReg); | |||
292 | else | |||
293 | Builder.buildBuildVector(NewDstReg, Ops); | |||
294 | MI.eraseFromParent(); | |||
295 | replaceRegWith(MRI, DstReg, NewDstReg); | |||
296 | } | |||
297 | ||||
298 | bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) { | |||
299 | SmallVector<Register, 4> Ops; | |||
300 | if (matchCombineShuffleVector(MI, Ops)) { | |||
301 | applyCombineShuffleVector(MI, Ops); | |||
302 | return true; | |||
303 | } | |||
304 | return false; | |||
305 | } | |||
306 | ||||
307 | bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI, | |||
308 | SmallVectorImpl<Register> &Ops) { | |||
309 | assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && "Invalid instruction kind") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && \"Invalid instruction kind\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 310, __extension__ __PRETTY_FUNCTION__)) | |||
310 | "Invalid instruction kind")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && "Invalid instruction kind") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && \"Invalid instruction kind\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 310, __extension__ __PRETTY_FUNCTION__)); | |||
311 | LLT DstType = MRI.getType(MI.getOperand(0).getReg()); | |||
312 | Register Src1 = MI.getOperand(1).getReg(); | |||
313 | LLT SrcType = MRI.getType(Src1); | |||
314 | // As bizarre as it may look, shuffle vector can actually produce | |||
315 | // scalar! This is because at the IR level a <1 x ty> shuffle | |||
316 | // vector is perfectly valid. | |||
317 | unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1; | |||
318 | unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1; | |||
319 | ||||
320 | // If the resulting vector is smaller than the size of the source | |||
321 | // vectors being concatenated, we won't be able to replace the | |||
322 | // shuffle vector into a concat_vectors. | |||
323 | // | |||
324 | // Note: We may still be able to produce a concat_vectors fed by | |||
325 | // extract_vector_elt and so on. It is less clear that would | |||
326 | // be better though, so don't bother for now. | |||
327 | // | |||
328 | // If the destination is a scalar, the size of the sources doesn't | |||
329 | // matter. we will lower the shuffle to a plain copy. This will | |||
330 | // work only if the source and destination have the same size. But | |||
331 | // that's covered by the next condition. | |||
332 | // | |||
333 | // TODO: If the size between the source and destination don't match | |||
334 | // we could still emit an extract vector element in that case. | |||
335 | if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1) | |||
336 | return false; | |||
337 | ||||
338 | // Check that the shuffle mask can be broken evenly between the | |||
339 | // different sources. | |||
340 | if (DstNumElts % SrcNumElts != 0) | |||
341 | return false; | |||
342 | ||||
343 | // Mask length is a multiple of the source vector length. | |||
344 | // Check if the shuffle is some kind of concatenation of the input | |||
345 | // vectors. | |||
346 | unsigned NumConcat = DstNumElts / SrcNumElts; | |||
347 | SmallVector<int, 8> ConcatSrcs(NumConcat, -1); | |||
348 | ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); | |||
349 | for (unsigned i = 0; i != DstNumElts; ++i) { | |||
350 | int Idx = Mask[i]; | |||
351 | // Undef value. | |||
352 | if (Idx < 0) | |||
353 | continue; | |||
354 | // Ensure the indices in each SrcType sized piece are sequential and that | |||
355 | // the same source is used for the whole piece. | |||
356 | if ((Idx % SrcNumElts != (i % SrcNumElts)) || | |||
357 | (ConcatSrcs[i / SrcNumElts] >= 0 && | |||
358 | ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) | |||
359 | return false; | |||
360 | // Remember which source this index came from. | |||
361 | ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; | |||
362 | } | |||
363 | ||||
364 | // The shuffle is concatenating multiple vectors together. | |||
365 | // Collect the different operands for that. | |||
366 | Register UndefReg; | |||
367 | Register Src2 = MI.getOperand(2).getReg(); | |||
368 | for (auto Src : ConcatSrcs) { | |||
369 | if (Src < 0) { | |||
370 | if (!UndefReg) { | |||
371 | Builder.setInsertPt(*MI.getParent(), MI); | |||
372 | UndefReg = Builder.buildUndef(SrcType).getReg(0); | |||
373 | } | |||
374 | Ops.push_back(UndefReg); | |||
375 | } else if (Src == 0) | |||
376 | Ops.push_back(Src1); | |||
377 | else | |||
378 | Ops.push_back(Src2); | |||
379 | } | |||
380 | return true; | |||
381 | } | |||
382 | ||||
383 | void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI, | |||
384 | const ArrayRef<Register> Ops) { | |||
385 | Register DstReg = MI.getOperand(0).getReg(); | |||
386 | Builder.setInsertPt(*MI.getParent(), MI); | |||
387 | Register NewDstReg = MRI.cloneVirtualRegister(DstReg); | |||
388 | ||||
389 | if (Ops.size() == 1) | |||
390 | Builder.buildCopy(NewDstReg, Ops[0]); | |||
391 | else | |||
392 | Builder.buildMergeLikeInstr(NewDstReg, Ops); | |||
393 | ||||
394 | MI.eraseFromParent(); | |||
395 | replaceRegWith(MRI, DstReg, NewDstReg); | |||
396 | } | |||
397 | ||||
398 | namespace { | |||
399 | ||||
400 | /// Select a preference between two uses. CurrentUse is the current preference | |||
401 | /// while *ForCandidate is attributes of the candidate under consideration. | |||
402 | PreferredTuple ChoosePreferredUse(MachineInstr &LoadMI, | |||
403 | PreferredTuple &CurrentUse, | |||
404 | const LLT TyForCandidate, | |||
405 | unsigned OpcodeForCandidate, | |||
406 | MachineInstr *MIForCandidate) { | |||
407 | if (!CurrentUse.Ty.isValid()) { | |||
408 | if (CurrentUse.ExtendOpcode == OpcodeForCandidate || | |||
409 | CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT) | |||
410 | return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; | |||
411 | return CurrentUse; | |||
412 | } | |||
413 | ||||
414 | // We permit the extend to hoist through basic blocks but this is only | |||
415 | // sensible if the target has extending loads. If you end up lowering back | |||
416 | // into a load and extend during the legalizer then the end result is | |||
417 | // hoisting the extend up to the load. | |||
418 | ||||
419 | // Prefer defined extensions to undefined extensions as these are more | |||
420 | // likely to reduce the number of instructions. | |||
421 | if (OpcodeForCandidate == TargetOpcode::G_ANYEXT && | |||
422 | CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT) | |||
423 | return CurrentUse; | |||
424 | else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT && | |||
425 | OpcodeForCandidate != TargetOpcode::G_ANYEXT) | |||
426 | return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; | |||
427 | ||||
428 | // Prefer sign extensions to zero extensions as sign-extensions tend to be | |||
429 | // more expensive. Don't do this if the load is already a zero-extend load | |||
430 | // though, otherwise we'll rewrite a zero-extend load into a sign-extend | |||
431 | // later. | |||
432 | if (!isa<GZExtLoad>(LoadMI) && CurrentUse.Ty == TyForCandidate) { | |||
433 | if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT && | |||
434 | OpcodeForCandidate == TargetOpcode::G_ZEXT) | |||
435 | return CurrentUse; | |||
436 | else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT && | |||
437 | OpcodeForCandidate == TargetOpcode::G_SEXT) | |||
438 | return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; | |||
439 | } | |||
440 | ||||
441 | // This is potentially target specific. We've chosen the largest type | |||
442 | // because G_TRUNC is usually free. One potential catch with this is that | |||
443 | // some targets have a reduced number of larger registers than smaller | |||
444 | // registers and this choice potentially increases the live-range for the | |||
445 | // larger value. | |||
446 | if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) { | |||
447 | return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; | |||
448 | } | |||
449 | return CurrentUse; | |||
450 | } | |||
451 | ||||
452 | /// Find a suitable place to insert some instructions and insert them. This | |||
453 | /// function accounts for special cases like inserting before a PHI node. | |||
454 | /// The current strategy for inserting before PHI's is to duplicate the | |||
455 | /// instructions for each predecessor. However, while that's ok for G_TRUNC | |||
456 | /// on most targets since it generally requires no code, other targets/cases may | |||
457 | /// want to try harder to find a dominating block. | |||
458 | static void InsertInsnsWithoutSideEffectsBeforeUse( | |||
459 | MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO, | |||
460 | std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator, | |||
461 | MachineOperand &UseMO)> | |||
462 | Inserter) { | |||
463 | MachineInstr &UseMI = *UseMO.getParent(); | |||
464 | ||||
465 | MachineBasicBlock *InsertBB = UseMI.getParent(); | |||
466 | ||||
467 | // If the use is a PHI then we want the predecessor block instead. | |||
468 | if (UseMI.isPHI()) { | |||
469 | MachineOperand *PredBB = std::next(&UseMO); | |||
470 | InsertBB = PredBB->getMBB(); | |||
471 | } | |||
472 | ||||
473 | // If the block is the same block as the def then we want to insert just after | |||
474 | // the def instead of at the start of the block. | |||
475 | if (InsertBB == DefMI.getParent()) { | |||
476 | MachineBasicBlock::iterator InsertPt = &DefMI; | |||
477 | Inserter(InsertBB, std::next(InsertPt), UseMO); | |||
478 | return; | |||
479 | } | |||
480 | ||||
481 | // Otherwise we want the start of the BB | |||
482 | Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO); | |||
483 | } | |||
484 | } // end anonymous namespace | |||
485 | ||||
486 | bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) { | |||
487 | PreferredTuple Preferred; | |||
488 | if (matchCombineExtendingLoads(MI, Preferred)) { | |||
489 | applyCombineExtendingLoads(MI, Preferred); | |||
490 | return true; | |||
491 | } | |||
492 | return false; | |||
493 | } | |||
494 | ||||
495 | static unsigned getExtLoadOpcForExtend(unsigned ExtOpc) { | |||
496 | unsigned CandidateLoadOpc; | |||
497 | switch (ExtOpc) { | |||
498 | case TargetOpcode::G_ANYEXT: | |||
499 | CandidateLoadOpc = TargetOpcode::G_LOAD; | |||
500 | break; | |||
501 | case TargetOpcode::G_SEXT: | |||
502 | CandidateLoadOpc = TargetOpcode::G_SEXTLOAD; | |||
503 | break; | |||
504 | case TargetOpcode::G_ZEXT: | |||
505 | CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD; | |||
506 | break; | |||
507 | default: | |||
508 | llvm_unreachable("Unexpected extend opc")::llvm::llvm_unreachable_internal("Unexpected extend opc", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp" , 508); | |||
509 | } | |||
510 | return CandidateLoadOpc; | |||
511 | } | |||
512 | ||||
513 | bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI, | |||
514 | PreferredTuple &Preferred) { | |||
515 | // We match the loads and follow the uses to the extend instead of matching | |||
516 | // the extends and following the def to the load. This is because the load | |||
517 | // must remain in the same position for correctness (unless we also add code | |||
518 | // to find a safe place to sink it) whereas the extend is freely movable. | |||
519 | // It also prevents us from duplicating the load for the volatile case or just | |||
520 | // for performance. | |||
521 | GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI); | |||
522 | if (!LoadMI) | |||
523 | return false; | |||
524 | ||||
525 | Register LoadReg = LoadMI->getDstReg(); | |||
526 | ||||
527 | LLT LoadValueTy = MRI.getType(LoadReg); | |||
528 | if (!LoadValueTy.isScalar()) | |||
529 | return false; | |||
530 | ||||
531 | // Most architectures are going to legalize <s8 loads into at least a 1 byte | |||
532 | // load, and the MMOs can only describe memory accesses in multiples of bytes. | |||
533 | // If we try to perform extload combining on those, we can end up with | |||
534 | // %a(s8) = extload %ptr (load 1 byte from %ptr) | |||
535 | // ... which is an illegal extload instruction. | |||
536 | if (LoadValueTy.getSizeInBits() < 8) | |||
537 | return false; | |||
538 | ||||
539 | // For non power-of-2 types, they will very likely be legalized into multiple | |||
540 | // loads. Don't bother trying to match them into extending loads. | |||
541 | if (!llvm::has_single_bit<uint32_t>(LoadValueTy.getSizeInBits())) | |||
542 | return false; | |||
543 | ||||
544 | // Find the preferred type aside from the any-extends (unless it's the only | |||
545 | // one) and non-extending ops. We'll emit an extending load to that type and | |||
546 | // and emit a variant of (extend (trunc X)) for the others according to the | |||
547 | // relative type sizes. At the same time, pick an extend to use based on the | |||
548 | // extend involved in the chosen type. | |||
549 | unsigned PreferredOpcode = | |||
550 | isa<GLoad>(&MI) | |||
551 | ? TargetOpcode::G_ANYEXT | |||
552 | : isa<GSExtLoad>(&MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT; | |||
553 | Preferred = {LLT(), PreferredOpcode, nullptr}; | |||
554 | for (auto &UseMI : MRI.use_nodbg_instructions(LoadReg)) { | |||
555 | if (UseMI.getOpcode() == TargetOpcode::G_SEXT || | |||
556 | UseMI.getOpcode() == TargetOpcode::G_ZEXT || | |||
557 | (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) { | |||
558 | const auto &MMO = LoadMI->getMMO(); | |||
559 | // For atomics, only form anyextending loads. | |||
560 | if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT) | |||
561 | continue; | |||
562 | // Check for legality. | |||
563 | if (!isPreLegalize()) { | |||
564 | LegalityQuery::MemDesc MMDesc(MMO); | |||
565 | unsigned CandidateLoadOpc = getExtLoadOpcForExtend(UseMI.getOpcode()); | |||
566 | LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg()); | |||
567 | LLT SrcTy = MRI.getType(LoadMI->getPointerReg()); | |||
568 | if (LI->getAction({CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}}) | |||
569 | .Action != LegalizeActions::Legal) | |||
570 | continue; | |||
571 | } | |||
572 | Preferred = ChoosePreferredUse(MI, Preferred, | |||
573 | MRI.getType(UseMI.getOperand(0).getReg()), | |||
574 | UseMI.getOpcode(), &UseMI); | |||
575 | } | |||
576 | } | |||
577 | ||||
578 | // There were no extends | |||
579 | if (!Preferred.MI) | |||
580 | return false; | |||
581 | // It should be impossible to chose an extend without selecting a different | |||
582 | // type since by definition the result of an extend is larger. | |||
583 | assert(Preferred.Ty != LoadValueTy && "Extending to same type?")(static_cast <bool> (Preferred.Ty != LoadValueTy && "Extending to same type?") ? void (0) : __assert_fail ("Preferred.Ty != LoadValueTy && \"Extending to same type?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 583, __extension__ __PRETTY_FUNCTION__)); | |||
584 | ||||
585 | LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << "Preferred use is: " << *Preferred.MI; } } while (false); | |||
586 | return true; | |||
587 | } | |||
588 | ||||
589 | void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI, | |||
590 | PreferredTuple &Preferred) { | |||
591 | // Rewrite the load to the chosen extending load. | |||
592 | Register ChosenDstReg = Preferred.MI->getOperand(0).getReg(); | |||
593 | ||||
594 | // Inserter to insert a truncate back to the original type at a given point | |||
595 | // with some basic CSE to limit truncate duplication to one per BB. | |||
596 | DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns; | |||
597 | auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB, | |||
598 | MachineBasicBlock::iterator InsertBefore, | |||
599 | MachineOperand &UseMO) { | |||
600 | MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB); | |||
601 | if (PreviouslyEmitted) { | |||
602 | Observer.changingInstr(*UseMO.getParent()); | |||
603 | UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg()); | |||
604 | Observer.changedInstr(*UseMO.getParent()); | |||
605 | return; | |||
606 | } | |||
607 | ||||
608 | Builder.setInsertPt(*InsertIntoBB, InsertBefore); | |||
609 | Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg()); | |||
610 | MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg); | |||
611 | EmittedInsns[InsertIntoBB] = NewMI; | |||
612 | replaceRegOpWith(MRI, UseMO, NewDstReg); | |||
613 | }; | |||
614 | ||||
615 | Observer.changingInstr(MI); | |||
616 | unsigned LoadOpc = getExtLoadOpcForExtend(Preferred.ExtendOpcode); | |||
617 | MI.setDesc(Builder.getTII().get(LoadOpc)); | |||
618 | ||||
619 | // Rewrite all the uses to fix up the types. | |||
620 | auto &LoadValue = MI.getOperand(0); | |||
621 | SmallVector<MachineOperand *, 4> Uses; | |||
622 | for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) | |||
623 | Uses.push_back(&UseMO); | |||
624 | ||||
625 | for (auto *UseMO : Uses) { | |||
626 | MachineInstr *UseMI = UseMO->getParent(); | |||
627 | ||||
628 | // If the extend is compatible with the preferred extend then we should fix | |||
629 | // up the type and extend so that it uses the preferred use. | |||
630 | if (UseMI->getOpcode() == Preferred.ExtendOpcode || | |||
631 | UseMI->getOpcode() == TargetOpcode::G_ANYEXT) { | |||
632 | Register UseDstReg = UseMI->getOperand(0).getReg(); | |||
633 | MachineOperand &UseSrcMO = UseMI->getOperand(1); | |||
634 | const LLT UseDstTy = MRI.getType(UseDstReg); | |||
635 | if (UseDstReg != ChosenDstReg) { | |||
636 | if (Preferred.Ty == UseDstTy) { | |||
637 | // If the use has the same type as the preferred use, then merge | |||
638 | // the vregs and erase the extend. For example: | |||
639 | // %1:_(s8) = G_LOAD ... | |||
640 | // %2:_(s32) = G_SEXT %1(s8) | |||
641 | // %3:_(s32) = G_ANYEXT %1(s8) | |||
642 | // ... = ... %3(s32) | |||
643 | // rewrites to: | |||
644 | // %2:_(s32) = G_SEXTLOAD ... | |||
645 | // ... = ... %2(s32) | |||
646 | replaceRegWith(MRI, UseDstReg, ChosenDstReg); | |||
647 | Observer.erasingInstr(*UseMO->getParent()); | |||
648 | UseMO->getParent()->eraseFromParent(); | |||
649 | } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) { | |||
650 | // If the preferred size is smaller, then keep the extend but extend | |||
651 | // from the result of the extending load. For example: | |||
652 | // %1:_(s8) = G_LOAD ... | |||
653 | // %2:_(s32) = G_SEXT %1(s8) | |||
654 | // %3:_(s64) = G_ANYEXT %1(s8) | |||
655 | // ... = ... %3(s64) | |||
656 | /// rewrites to: | |||
657 | // %2:_(s32) = G_SEXTLOAD ... | |||
658 | // %3:_(s64) = G_ANYEXT %2:_(s32) | |||
659 | // ... = ... %3(s64) | |||
660 | replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg); | |||
661 | } else { | |||
662 | // If the preferred size is large, then insert a truncate. For | |||
663 | // example: | |||
664 | // %1:_(s8) = G_LOAD ... | |||
665 | // %2:_(s64) = G_SEXT %1(s8) | |||
666 | // %3:_(s32) = G_ZEXT %1(s8) | |||
667 | // ... = ... %3(s32) | |||
668 | /// rewrites to: | |||
669 | // %2:_(s64) = G_SEXTLOAD ... | |||
670 | // %4:_(s8) = G_TRUNC %2:_(s32) | |||
671 | // %3:_(s64) = G_ZEXT %2:_(s8) | |||
672 | // ... = ... %3(s64) | |||
673 | InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, | |||
674 | InsertTruncAt); | |||
675 | } | |||
676 | continue; | |||
677 | } | |||
678 | // The use is (one of) the uses of the preferred use we chose earlier. | |||
679 | // We're going to update the load to def this value later so just erase | |||
680 | // the old extend. | |||
681 | Observer.erasingInstr(*UseMO->getParent()); | |||
682 | UseMO->getParent()->eraseFromParent(); | |||
683 | continue; | |||
684 | } | |||
685 | ||||
686 | // The use isn't an extend. Truncate back to the type we originally loaded. | |||
687 | // This is free on many targets. | |||
688 | InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt); | |||
689 | } | |||
690 | ||||
691 | MI.getOperand(0).setReg(ChosenDstReg); | |||
692 | Observer.changedInstr(MI); | |||
693 | } | |||
694 | ||||
695 | bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI, | |||
696 | BuildFnTy &MatchInfo) { | |||
697 | assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 697, __extension__ __PRETTY_FUNCTION__)); | |||
698 | ||||
699 | // If we have the following code: | |||
700 | // %mask = G_CONSTANT 255 | |||
701 | // %ld = G_LOAD %ptr, (load s16) | |||
702 | // %and = G_AND %ld, %mask | |||
703 | // | |||
704 | // Try to fold it into | |||
705 | // %ld = G_ZEXTLOAD %ptr, (load s8) | |||
706 | ||||
707 | Register Dst = MI.getOperand(0).getReg(); | |||
708 | if (MRI.getType(Dst).isVector()) | |||
709 | return false; | |||
710 | ||||
711 | auto MaybeMask = | |||
712 | getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); | |||
713 | if (!MaybeMask) | |||
714 | return false; | |||
715 | ||||
716 | APInt MaskVal = MaybeMask->Value; | |||
717 | ||||
718 | if (!MaskVal.isMask()) | |||
719 | return false; | |||
720 | ||||
721 | Register SrcReg = MI.getOperand(1).getReg(); | |||
722 | // Don't use getOpcodeDef() here since intermediate instructions may have | |||
723 | // multiple users. | |||
724 | GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg)); | |||
725 | if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg())) | |||
726 | return false; | |||
727 | ||||
728 | Register LoadReg = LoadMI->getDstReg(); | |||
729 | LLT RegTy = MRI.getType(LoadReg); | |||
730 | Register PtrReg = LoadMI->getPointerReg(); | |||
731 | unsigned RegSize = RegTy.getSizeInBits(); | |||
732 | uint64_t LoadSizeBits = LoadMI->getMemSizeInBits(); | |||
733 | unsigned MaskSizeBits = MaskVal.countr_one(); | |||
734 | ||||
735 | // The mask may not be larger than the in-memory type, as it might cover sign | |||
736 | // extended bits | |||
737 | if (MaskSizeBits > LoadSizeBits) | |||
738 | return false; | |||
739 | ||||
740 | // If the mask covers the whole destination register, there's nothing to | |||
741 | // extend | |||
742 | if (MaskSizeBits >= RegSize) | |||
743 | return false; | |||
744 | ||||
745 | // Most targets cannot deal with loads of size < 8 and need to re-legalize to | |||
746 | // at least byte loads. Avoid creating such loads here | |||
747 | if (MaskSizeBits < 8 || !isPowerOf2_32(MaskSizeBits)) | |||
748 | return false; | |||
749 | ||||
750 | const MachineMemOperand &MMO = LoadMI->getMMO(); | |||
751 | LegalityQuery::MemDesc MemDesc(MMO); | |||
752 | ||||
753 | // Don't modify the memory access size if this is atomic/volatile, but we can | |||
754 | // still adjust the opcode to indicate the high bit behavior. | |||
755 | if (LoadMI->isSimple()) | |||
756 | MemDesc.MemoryTy = LLT::scalar(MaskSizeBits); | |||
757 | else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize) | |||
758 | return false; | |||
759 | ||||
760 | // TODO: Could check if it's legal with the reduced or original memory size. | |||
761 | if (!isLegalOrBeforeLegalizer( | |||
762 | {TargetOpcode::G_ZEXTLOAD, {RegTy, MRI.getType(PtrReg)}, {MemDesc}})) | |||
763 | return false; | |||
764 | ||||
765 | MatchInfo = [=](MachineIRBuilder &B) { | |||
766 | B.setInstrAndDebugLoc(*LoadMI); | |||
767 | auto &MF = B.getMF(); | |||
768 | auto PtrInfo = MMO.getPointerInfo(); | |||
769 | auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.MemoryTy); | |||
770 | B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO); | |||
771 | LoadMI->eraseFromParent(); | |||
772 | }; | |||
773 | return true; | |||
774 | } | |||
775 | ||||
776 | bool CombinerHelper::isPredecessor(const MachineInstr &DefMI, | |||
777 | const MachineInstr &UseMI) { | |||
778 | assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&(static_cast <bool> (!DefMI.isDebugInstr() && ! UseMI.isDebugInstr() && "shouldn't consider debug uses" ) ? void (0) : __assert_fail ("!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && \"shouldn't consider debug uses\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 779, __extension__ __PRETTY_FUNCTION__)) | |||
779 | "shouldn't consider debug uses")(static_cast <bool> (!DefMI.isDebugInstr() && ! UseMI.isDebugInstr() && "shouldn't consider debug uses" ) ? void (0) : __assert_fail ("!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && \"shouldn't consider debug uses\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 779, __extension__ __PRETTY_FUNCTION__)); | |||
780 | assert(DefMI.getParent() == UseMI.getParent())(static_cast <bool> (DefMI.getParent() == UseMI.getParent ()) ? void (0) : __assert_fail ("DefMI.getParent() == UseMI.getParent()" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 780, __extension__ __PRETTY_FUNCTION__)); | |||
781 | if (&DefMI == &UseMI) | |||
782 | return true; | |||
783 | const MachineBasicBlock &MBB = *DefMI.getParent(); | |||
784 | auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) { | |||
785 | return &MI == &DefMI || &MI == &UseMI; | |||
786 | }); | |||
787 | if (DefOrUse == MBB.end()) | |||
788 | llvm_unreachable("Block must contain both DefMI and UseMI!")::llvm::llvm_unreachable_internal("Block must contain both DefMI and UseMI!" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 788); | |||
789 | return &*DefOrUse == &DefMI; | |||
790 | } | |||
791 | ||||
792 | bool CombinerHelper::dominates(const MachineInstr &DefMI, | |||
793 | const MachineInstr &UseMI) { | |||
794 | assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&(static_cast <bool> (!DefMI.isDebugInstr() && ! UseMI.isDebugInstr() && "shouldn't consider debug uses" ) ? void (0) : __assert_fail ("!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && \"shouldn't consider debug uses\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 795, __extension__ __PRETTY_FUNCTION__)) | |||
795 | "shouldn't consider debug uses")(static_cast <bool> (!DefMI.isDebugInstr() && ! UseMI.isDebugInstr() && "shouldn't consider debug uses" ) ? void (0) : __assert_fail ("!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && \"shouldn't consider debug uses\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 795, __extension__ __PRETTY_FUNCTION__)); | |||
796 | if (MDT) | |||
797 | return MDT->dominates(&DefMI, &UseMI); | |||
798 | else if (DefMI.getParent() != UseMI.getParent()) | |||
799 | return false; | |||
800 | ||||
801 | return isPredecessor(DefMI, UseMI); | |||
802 | } | |||
803 | ||||
804 | bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) { | |||
805 | assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SEXT_INREG ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SEXT_INREG" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 805, __extension__ __PRETTY_FUNCTION__)); | |||
806 | Register SrcReg = MI.getOperand(1).getReg(); | |||
807 | Register LoadUser = SrcReg; | |||
808 | ||||
809 | if (MRI.getType(SrcReg).isVector()) | |||
810 | return false; | |||
811 | ||||
812 | Register TruncSrc; | |||
813 | if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) | |||
814 | LoadUser = TruncSrc; | |||
815 | ||||
816 | uint64_t SizeInBits = MI.getOperand(2).getImm(); | |||
817 | // If the source is a G_SEXTLOAD from the same bit width, then we don't | |||
818 | // need any extend at all, just a truncate. | |||
819 | if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) { | |||
820 | // If truncating more than the original extended value, abort. | |||
821 | auto LoadSizeBits = LoadMI->getMemSizeInBits(); | |||
822 | if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits) | |||
823 | return false; | |||
824 | if (LoadSizeBits == SizeInBits) | |||
825 | return true; | |||
826 | } | |||
827 | return false; | |||
828 | } | |||
829 | ||||
830 | void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) { | |||
831 | assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SEXT_INREG ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SEXT_INREG" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 831, __extension__ __PRETTY_FUNCTION__)); | |||
832 | Builder.setInstrAndDebugLoc(MI); | |||
833 | Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg()); | |||
834 | MI.eraseFromParent(); | |||
835 | } | |||
836 | ||||
837 | bool CombinerHelper::matchSextInRegOfLoad( | |||
838 | MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { | |||
839 | assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SEXT_INREG ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SEXT_INREG" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 839, __extension__ __PRETTY_FUNCTION__)); | |||
840 | ||||
841 | Register DstReg = MI.getOperand(0).getReg(); | |||
842 | LLT RegTy = MRI.getType(DstReg); | |||
843 | ||||
844 | // Only supports scalars for now. | |||
845 | if (RegTy.isVector()) | |||
846 | return false; | |||
847 | ||||
848 | Register SrcReg = MI.getOperand(1).getReg(); | |||
849 | auto *LoadDef = getOpcodeDef<GLoad>(SrcReg, MRI); | |||
850 | if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg)) | |||
851 | return false; | |||
852 | ||||
853 | uint64_t MemBits = LoadDef->getMemSizeInBits(); | |||
854 | ||||
855 | // If the sign extend extends from a narrower width than the load's width, | |||
856 | // then we can narrow the load width when we combine to a G_SEXTLOAD. | |||
857 | // Avoid widening the load at all. | |||
858 | unsigned NewSizeBits = std::min((uint64_t)MI.getOperand(2).getImm(), MemBits); | |||
859 | ||||
860 | // Don't generate G_SEXTLOADs with a < 1 byte width. | |||
861 | if (NewSizeBits < 8) | |||
862 | return false; | |||
863 | // Don't bother creating a non-power-2 sextload, it will likely be broken up | |||
864 | // anyway for most targets. | |||
865 | if (!isPowerOf2_32(NewSizeBits)) | |||
866 | return false; | |||
867 | ||||
868 | const MachineMemOperand &MMO = LoadDef->getMMO(); | |||
869 | LegalityQuery::MemDesc MMDesc(MMO); | |||
870 | ||||
871 | // Don't modify the memory access size if this is atomic/volatile, but we can | |||
872 | // still adjust the opcode to indicate the high bit behavior. | |||
873 | if (LoadDef->isSimple()) | |||
874 | MMDesc.MemoryTy = LLT::scalar(NewSizeBits); | |||
875 | else if (MemBits > NewSizeBits || MemBits == RegTy.getSizeInBits()) | |||
876 | return false; | |||
877 | ||||
878 | // TODO: Could check if it's legal with the reduced or original memory size. | |||
879 | if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SEXTLOAD, | |||
880 | {MRI.getType(LoadDef->getDstReg()), | |||
881 | MRI.getType(LoadDef->getPointerReg())}, | |||
882 | {MMDesc}})) | |||
883 | return false; | |||
884 | ||||
885 | MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits); | |||
886 | return true; | |||
887 | } | |||
888 | ||||
889 | void CombinerHelper::applySextInRegOfLoad( | |||
890 | MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { | |||
891 | assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SEXT_INREG ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SEXT_INREG" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 891, __extension__ __PRETTY_FUNCTION__)); | |||
892 | Register LoadReg; | |||
893 | unsigned ScalarSizeBits; | |||
894 | std::tie(LoadReg, ScalarSizeBits) = MatchInfo; | |||
895 | GLoad *LoadDef = cast<GLoad>(MRI.getVRegDef(LoadReg)); | |||
896 | ||||
897 | // If we have the following: | |||
898 | // %ld = G_LOAD %ptr, (load 2) | |||
899 | // %ext = G_SEXT_INREG %ld, 8 | |||
900 | // ==> | |||
901 | // %ld = G_SEXTLOAD %ptr (load 1) | |||
902 | ||||
903 | auto &MMO = LoadDef->getMMO(); | |||
904 | Builder.setInstrAndDebugLoc(*LoadDef); | |||
905 | auto &MF = Builder.getMF(); | |||
906 | auto PtrInfo = MMO.getPointerInfo(); | |||
907 | auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8); | |||
908 | Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(), | |||
909 | LoadDef->getPointerReg(), *NewMMO); | |||
910 | MI.eraseFromParent(); | |||
911 | } | |||
912 | ||||
913 | bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr, | |||
914 | Register &Base, Register &Offset) { | |||
915 | auto &MF = *MI.getParent()->getParent(); | |||
916 | const auto &TLI = *MF.getSubtarget().getTargetLowering(); | |||
917 | ||||
918 | #ifndef NDEBUG | |||
919 | unsigned Opcode = MI.getOpcode(); | |||
920 | assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||(static_cast <bool> (Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE) ? void (0) : __assert_fail ("Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 921, __extension__ __PRETTY_FUNCTION__)) | |||
921 | Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE)(static_cast <bool> (Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE) ? void (0) : __assert_fail ("Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 921, __extension__ __PRETTY_FUNCTION__)); | |||
922 | #endif | |||
923 | ||||
924 | Base = MI.getOperand(1).getReg(); | |||
925 | MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base); | |||
926 | if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) | |||
927 | return false; | |||
928 | ||||
929 | LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << "Searching for post-indexing opportunity for: " << MI; } } while (false); | |||
930 | // FIXME: The following use traversal needs a bail out for patholigical cases. | |||
931 | for (auto &Use : MRI.use_nodbg_instructions(Base)) { | |||
932 | if (Use.getOpcode() != TargetOpcode::G_PTR_ADD) | |||
933 | continue; | |||
934 | ||||
935 | Offset = Use.getOperand(2).getReg(); | |||
936 | if (!ForceLegalIndexing && | |||
937 | !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) { | |||
938 | LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Ignoring candidate with illegal addrmode: " << Use; } } while (false) | |||
939 | << Use)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Ignoring candidate with illegal addrmode: " << Use; } } while (false); | |||
940 | continue; | |||
941 | } | |||
942 | ||||
943 | // Make sure the offset calculation is before the potentially indexed op. | |||
944 | // FIXME: we really care about dependency here. The offset calculation might | |||
945 | // be movable. | |||
946 | MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset); | |||
947 | if (!OffsetDef || !dominates(*OffsetDef, MI)) { | |||
948 | LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Ignoring candidate with offset after mem-op: " << Use; } } while (false) | |||
949 | << Use)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Ignoring candidate with offset after mem-op: " << Use; } } while (false); | |||
950 | continue; | |||
951 | } | |||
952 | ||||
953 | // FIXME: check whether all uses of Base are load/store with foldable | |||
954 | // addressing modes. If so, using the normal addr-modes is better than | |||
955 | // forming an indexed one. | |||
956 | ||||
957 | bool MemOpDominatesAddrUses = true; | |||
958 | for (auto &PtrAddUse : | |||
959 | MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) { | |||
960 | if (!dominates(MI, PtrAddUse)) { | |||
961 | MemOpDominatesAddrUses = false; | |||
962 | break; | |||
963 | } | |||
964 | } | |||
965 | ||||
966 | if (!MemOpDominatesAddrUses) { | |||
967 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Ignoring candidate as memop does not dominate uses: " << Use; } } while (false) | |||
968 | dbgs() << " Ignoring candidate as memop does not dominate uses: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Ignoring candidate as memop does not dominate uses: " << Use; } } while (false) | |||
969 | << Use)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Ignoring candidate as memop does not dominate uses: " << Use; } } while (false); | |||
970 | continue; | |||
971 | } | |||
972 | ||||
973 | LLVM_DEBUG(dbgs() << " Found match: " << Use)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Found match: " << Use; } } while (false); | |||
974 | Addr = Use.getOperand(0).getReg(); | |||
975 | return true; | |||
976 | } | |||
977 | ||||
978 | return false; | |||
979 | } | |||
980 | ||||
981 | bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr, | |||
982 | Register &Base, Register &Offset) { | |||
983 | auto &MF = *MI.getParent()->getParent(); | |||
984 | const auto &TLI = *MF.getSubtarget().getTargetLowering(); | |||
985 | ||||
986 | #ifndef NDEBUG | |||
987 | unsigned Opcode = MI.getOpcode(); | |||
988 | assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||(static_cast <bool> (Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE) ? void (0) : __assert_fail ("Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 989, __extension__ __PRETTY_FUNCTION__)) | |||
989 | Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE)(static_cast <bool> (Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE) ? void (0) : __assert_fail ("Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 989, __extension__ __PRETTY_FUNCTION__)); | |||
990 | #endif | |||
991 | ||||
992 | Addr = MI.getOperand(1).getReg(); | |||
993 | MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI); | |||
994 | if (!AddrDef || MRI.hasOneNonDBGUse(Addr)) | |||
995 | return false; | |||
996 | ||||
997 | Base = AddrDef->getOperand(1).getReg(); | |||
998 | Offset = AddrDef->getOperand(2).getReg(); | |||
999 | ||||
1000 | LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << "Found potential pre-indexed load_store: " << MI; } } while (false); | |||
1001 | ||||
1002 | if (!ForceLegalIndexing && | |||
1003 | !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) { | |||
1004 | LLVM_DEBUG(dbgs() << " Skipping, not legal for target")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Skipping, not legal for target" ; } } while (false); | |||
1005 | return false; | |||
1006 | } | |||
1007 | ||||
1008 | MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI); | |||
1009 | if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { | |||
1010 | LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Skipping, frame index would need copy anyway." ; } } while (false); | |||
1011 | return false; | |||
1012 | } | |||
1013 | ||||
1014 | if (MI.getOpcode() == TargetOpcode::G_STORE) { | |||
1015 | // Would require a copy. | |||
1016 | if (Base == MI.getOperand(0).getReg()) { | |||
1017 | LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Skipping, storing base so need copy anyway." ; } } while (false); | |||
1018 | return false; | |||
1019 | } | |||
1020 | ||||
1021 | // We're expecting one use of Addr in MI, but it could also be the | |||
1022 | // value stored, which isn't actually dominated by the instruction. | |||
1023 | if (MI.getOperand(0).getReg() == Addr) { | |||
1024 | LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Skipping, does not dominate all addr uses" ; } } while (false); | |||
1025 | return false; | |||
1026 | } | |||
1027 | } | |||
1028 | ||||
1029 | // FIXME: check whether all uses of the base pointer are constant PtrAdds. | |||
1030 | // That might allow us to end base's liveness here by adjusting the constant. | |||
1031 | ||||
1032 | for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) { | |||
1033 | if (!dominates(MI, UseMI)) { | |||
1034 | LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Skipping, does not dominate all addr uses." ; } } while (false); | |||
1035 | return false; | |||
1036 | } | |||
1037 | } | |||
1038 | ||||
1039 | return true; | |||
1040 | } | |||
1041 | ||||
1042 | bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) { | |||
1043 | IndexedLoadStoreMatchInfo MatchInfo; | |||
1044 | if (matchCombineIndexedLoadStore(MI, MatchInfo)) { | |||
1045 | applyCombineIndexedLoadStore(MI, MatchInfo); | |||
1046 | return true; | |||
1047 | } | |||
1048 | return false; | |||
1049 | } | |||
1050 | ||||
1051 | bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { | |||
1052 | unsigned Opcode = MI.getOpcode(); | |||
1053 | if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD && | |||
1054 | Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE) | |||
1055 | return false; | |||
1056 | ||||
1057 | // For now, no targets actually support these opcodes so don't waste time | |||
1058 | // running these unless we're forced to for testing. | |||
1059 | if (!ForceLegalIndexing) | |||
1060 | return false; | |||
1061 | ||||
1062 | MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, | |||
1063 | MatchInfo.Offset); | |||
1064 | if (!MatchInfo.IsPre && | |||
1065 | !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, | |||
1066 | MatchInfo.Offset)) | |||
1067 | return false; | |||
1068 | ||||
1069 | return true; | |||
1070 | } | |||
1071 | ||||
1072 | void CombinerHelper::applyCombineIndexedLoadStore( | |||
1073 | MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { | |||
1074 | MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr); | |||
1075 | MachineIRBuilder MIRBuilder(MI); | |||
1076 | unsigned Opcode = MI.getOpcode(); | |||
1077 | bool IsStore = Opcode == TargetOpcode::G_STORE; | |||
1078 | unsigned NewOpcode; | |||
1079 | switch (Opcode) { | |||
1080 | case TargetOpcode::G_LOAD: | |||
1081 | NewOpcode = TargetOpcode::G_INDEXED_LOAD; | |||
1082 | break; | |||
1083 | case TargetOpcode::G_SEXTLOAD: | |||
1084 | NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD; | |||
1085 | break; | |||
1086 | case TargetOpcode::G_ZEXTLOAD: | |||
1087 | NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD; | |||
1088 | break; | |||
1089 | case TargetOpcode::G_STORE: | |||
1090 | NewOpcode = TargetOpcode::G_INDEXED_STORE; | |||
1091 | break; | |||
1092 | default: | |||
1093 | llvm_unreachable("Unknown load/store opcode")::llvm::llvm_unreachable_internal("Unknown load/store opcode" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1093); | |||
1094 | } | |||
1095 | ||||
1096 | auto MIB = MIRBuilder.buildInstr(NewOpcode); | |||
1097 | if (IsStore) { | |||
1098 | MIB.addDef(MatchInfo.Addr); | |||
1099 | MIB.addUse(MI.getOperand(0).getReg()); | |||
1100 | } else { | |||
1101 | MIB.addDef(MI.getOperand(0).getReg()); | |||
1102 | MIB.addDef(MatchInfo.Addr); | |||
1103 | } | |||
1104 | ||||
1105 | MIB.addUse(MatchInfo.Base); | |||
1106 | MIB.addUse(MatchInfo.Offset); | |||
1107 | MIB.addImm(MatchInfo.IsPre); | |||
1108 | MI.eraseFromParent(); | |||
1109 | AddrDef.eraseFromParent(); | |||
1110 | ||||
1111 | LLVM_DEBUG(dbgs() << " Combinined to indexed operation")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("gi-combiner")) { dbgs() << " Combinined to indexed operation" ; } } while (false); | |||
1112 | } | |||
1113 | ||||
1114 | bool CombinerHelper::matchCombineDivRem(MachineInstr &MI, | |||
1115 | MachineInstr *&OtherMI) { | |||
1116 | unsigned Opcode = MI.getOpcode(); | |||
1117 | bool IsDiv, IsSigned; | |||
1118 | ||||
1119 | switch (Opcode) { | |||
1120 | default: | |||
1121 | llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp" , 1121); | |||
1122 | case TargetOpcode::G_SDIV: | |||
1123 | case TargetOpcode::G_UDIV: { | |||
1124 | IsDiv = true; | |||
1125 | IsSigned = Opcode == TargetOpcode::G_SDIV; | |||
1126 | break; | |||
1127 | } | |||
1128 | case TargetOpcode::G_SREM: | |||
1129 | case TargetOpcode::G_UREM: { | |||
1130 | IsDiv = false; | |||
1131 | IsSigned = Opcode == TargetOpcode::G_SREM; | |||
1132 | break; | |||
1133 | } | |||
1134 | } | |||
1135 | ||||
1136 | Register Src1 = MI.getOperand(1).getReg(); | |||
1137 | unsigned DivOpcode, RemOpcode, DivremOpcode; | |||
1138 | if (IsSigned) { | |||
1139 | DivOpcode = TargetOpcode::G_SDIV; | |||
1140 | RemOpcode = TargetOpcode::G_SREM; | |||
1141 | DivremOpcode = TargetOpcode::G_SDIVREM; | |||
1142 | } else { | |||
1143 | DivOpcode = TargetOpcode::G_UDIV; | |||
1144 | RemOpcode = TargetOpcode::G_UREM; | |||
1145 | DivremOpcode = TargetOpcode::G_UDIVREM; | |||
1146 | } | |||
1147 | ||||
1148 | if (!isLegalOrBeforeLegalizer({DivremOpcode, {MRI.getType(Src1)}})) | |||
1149 | return false; | |||
1150 | ||||
1151 | // Combine: | |||
1152 | // %div:_ = G_[SU]DIV %src1:_, %src2:_ | |||
1153 | // %rem:_ = G_[SU]REM %src1:_, %src2:_ | |||
1154 | // into: | |||
1155 | // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_ | |||
1156 | ||||
1157 | // Combine: | |||
1158 | // %rem:_ = G_[SU]REM %src1:_, %src2:_ | |||
1159 | // %div:_ = G_[SU]DIV %src1:_, %src2:_ | |||
1160 | // into: | |||
1161 | // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_ | |||
1162 | ||||
1163 | for (auto &UseMI : MRI.use_nodbg_instructions(Src1)) { | |||
1164 | if (MI.getParent() == UseMI.getParent() && | |||
1165 | ((IsDiv && UseMI.getOpcode() == RemOpcode) || | |||
1166 | (!IsDiv && UseMI.getOpcode() == DivOpcode)) && | |||
1167 | matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2)) && | |||
1168 | matchEqualDefs(MI.getOperand(1), UseMI.getOperand(1))) { | |||
1169 | OtherMI = &UseMI; | |||
1170 | return true; | |||
1171 | } | |||
1172 | } | |||
1173 | ||||
1174 | return false; | |||
1175 | } | |||
1176 | ||||
1177 | void CombinerHelper::applyCombineDivRem(MachineInstr &MI, | |||
1178 | MachineInstr *&OtherMI) { | |||
1179 | unsigned Opcode = MI.getOpcode(); | |||
1180 | assert(OtherMI && "OtherMI shouldn't be empty.")(static_cast <bool> (OtherMI && "OtherMI shouldn't be empty." ) ? void (0) : __assert_fail ("OtherMI && \"OtherMI shouldn't be empty.\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1180, __extension__ __PRETTY_FUNCTION__)); | |||
1181 | ||||
1182 | Register DestDivReg, DestRemReg; | |||
1183 | if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) { | |||
1184 | DestDivReg = MI.getOperand(0).getReg(); | |||
1185 | DestRemReg = OtherMI->getOperand(0).getReg(); | |||
1186 | } else { | |||
1187 | DestDivReg = OtherMI->getOperand(0).getReg(); | |||
1188 | DestRemReg = MI.getOperand(0).getReg(); | |||
1189 | } | |||
1190 | ||||
1191 | bool IsSigned = | |||
1192 | Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM; | |||
1193 | ||||
1194 | // Check which instruction is first in the block so we don't break def-use | |||
1195 | // deps by "moving" the instruction incorrectly. | |||
1196 | if (dominates(MI, *OtherMI)) | |||
1197 | Builder.setInstrAndDebugLoc(MI); | |||
1198 | else | |||
1199 | Builder.setInstrAndDebugLoc(*OtherMI); | |||
1200 | ||||
1201 | Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM | |||
1202 | : TargetOpcode::G_UDIVREM, | |||
1203 | {DestDivReg, DestRemReg}, | |||
1204 | {MI.getOperand(1).getReg(), MI.getOperand(2).getReg()}); | |||
1205 | MI.eraseFromParent(); | |||
1206 | OtherMI->eraseFromParent(); | |||
1207 | } | |||
1208 | ||||
1209 | bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI, | |||
1210 | MachineInstr *&BrCond) { | |||
1211 | assert(MI.getOpcode() == TargetOpcode::G_BR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_BR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_BR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1211, __extension__ __PRETTY_FUNCTION__)); | |||
1212 | ||||
1213 | // Try to match the following: | |||
1214 | // bb1: | |||
1215 | // G_BRCOND %c1, %bb2 | |||
1216 | // G_BR %bb3 | |||
1217 | // bb2: | |||
1218 | // ... | |||
1219 | // bb3: | |||
1220 | ||||
1221 | // The above pattern does not have a fall through to the successor bb2, always | |||
1222 | // resulting in a branch no matter which path is taken. Here we try to find | |||
1223 | // and replace that pattern with conditional branch to bb3 and otherwise | |||
1224 | // fallthrough to bb2. This is generally better for branch predictors. | |||
1225 | ||||
1226 | MachineBasicBlock *MBB = MI.getParent(); | |||
1227 | MachineBasicBlock::iterator BrIt(MI); | |||
1228 | if (BrIt == MBB->begin()) | |||
1229 | return false; | |||
1230 | assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator")(static_cast <bool> (std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator") ? void (0) : __assert_fail ("std::next(BrIt) == MBB->end() && \"expected G_BR to be a terminator\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1230, __extension__ __PRETTY_FUNCTION__)); | |||
1231 | ||||
1232 | BrCond = &*std::prev(BrIt); | |||
1233 | if (BrCond->getOpcode() != TargetOpcode::G_BRCOND) | |||
1234 | return false; | |||
1235 | ||||
1236 | // Check that the next block is the conditional branch target. Also make sure | |||
1237 | // that it isn't the same as the G_BR's target (otherwise, this will loop.) | |||
1238 | MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB(); | |||
1239 | return BrCondTarget != MI.getOperand(0).getMBB() && | |||
1240 | MBB->isLayoutSuccessor(BrCondTarget); | |||
1241 | } | |||
1242 | ||||
1243 | void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI, | |||
1244 | MachineInstr *&BrCond) { | |||
1245 | MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB(); | |||
1246 | Builder.setInstrAndDebugLoc(*BrCond); | |||
1247 | LLT Ty = MRI.getType(BrCond->getOperand(0).getReg()); | |||
1248 | // FIXME: Does int/fp matter for this? If so, we might need to restrict | |||
1249 | // this to i1 only since we might not know for sure what kind of | |||
1250 | // compare generated the condition value. | |||
1251 | auto True = Builder.buildConstant( | |||
1252 | Ty, getICmpTrueVal(getTargetLowering(), false, false)); | |||
1253 | auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True); | |||
1254 | ||||
1255 | auto *FallthroughBB = BrCond->getOperand(1).getMBB(); | |||
1256 | Observer.changingInstr(MI); | |||
1257 | MI.getOperand(0).setMBB(FallthroughBB); | |||
1258 | Observer.changedInstr(MI); | |||
1259 | ||||
1260 | // Change the conditional branch to use the inverted condition and | |||
1261 | // new target block. | |||
1262 | Observer.changingInstr(*BrCond); | |||
1263 | BrCond->getOperand(0).setReg(Xor.getReg(0)); | |||
1264 | BrCond->getOperand(1).setMBB(BrTarget); | |||
1265 | Observer.changedInstr(*BrCond); | |||
1266 | } | |||
1267 | ||||
1268 | static Type *getTypeForLLT(LLT Ty, LLVMContext &C) { | |||
1269 | if (Ty.isVector()) | |||
1270 | return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()), | |||
1271 | Ty.getNumElements()); | |||
1272 | return IntegerType::get(C, Ty.getSizeInBits()); | |||
1273 | } | |||
1274 | ||||
1275 | bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) { | |||
1276 | MachineIRBuilder HelperBuilder(MI); | |||
1277 | GISelObserverWrapper DummyObserver; | |||
1278 | LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder); | |||
1279 | return Helper.lowerMemcpyInline(MI) == | |||
1280 | LegalizerHelper::LegalizeResult::Legalized; | |||
1281 | } | |||
1282 | ||||
1283 | bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { | |||
1284 | MachineIRBuilder HelperBuilder(MI); | |||
1285 | GISelObserverWrapper DummyObserver; | |||
1286 | LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder); | |||
1287 | return Helper.lowerMemCpyFamily(MI, MaxLen) == | |||
1288 | LegalizerHelper::LegalizeResult::Legalized; | |||
1289 | } | |||
1290 | ||||
1291 | static std::optional<APFloat> | |||
1292 | constantFoldFpUnary(unsigned Opcode, LLT DstTy, const Register Op, | |||
1293 | const MachineRegisterInfo &MRI) { | |||
1294 | const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI); | |||
1295 | if (!MaybeCst) | |||
1296 | return std::nullopt; | |||
1297 | ||||
1298 | APFloat V = MaybeCst->getValueAPF(); | |||
1299 | switch (Opcode) { | |||
1300 | default: | |||
1301 | llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp" , 1301); | |||
1302 | case TargetOpcode::G_FNEG: { | |||
1303 | V.changeSign(); | |||
1304 | return V; | |||
1305 | } | |||
1306 | case TargetOpcode::G_FABS: { | |||
1307 | V.clearSign(); | |||
1308 | return V; | |||
1309 | } | |||
1310 | case TargetOpcode::G_FPTRUNC: | |||
1311 | break; | |||
1312 | case TargetOpcode::G_FSQRT: { | |||
1313 | bool Unused; | |||
1314 | V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused); | |||
1315 | V = APFloat(sqrt(V.convertToDouble())); | |||
1316 | break; | |||
1317 | } | |||
1318 | case TargetOpcode::G_FLOG2: { | |||
1319 | bool Unused; | |||
1320 | V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused); | |||
1321 | V = APFloat(log2(V.convertToDouble())); | |||
1322 | break; | |||
1323 | } | |||
1324 | } | |||
1325 | // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise, | |||
1326 | // `buildFConstant` will assert on size mismatch. Only `G_FPTRUNC`, `G_FSQRT`, | |||
1327 | // and `G_FLOG2` reach here. | |||
1328 | bool Unused; | |||
1329 | V.convert(getFltSemanticForLLT(DstTy), APFloat::rmNearestTiesToEven, &Unused); | |||
1330 | return V; | |||
1331 | } | |||
1332 | ||||
1333 | bool CombinerHelper::matchCombineConstantFoldFpUnary( | |||
1334 | MachineInstr &MI, std::optional<APFloat> &Cst) { | |||
1335 | Register DstReg = MI.getOperand(0).getReg(); | |||
1336 | Register SrcReg = MI.getOperand(1).getReg(); | |||
1337 | LLT DstTy = MRI.getType(DstReg); | |||
1338 | Cst = constantFoldFpUnary(MI.getOpcode(), DstTy, SrcReg, MRI); | |||
1339 | return Cst.has_value(); | |||
1340 | } | |||
1341 | ||||
1342 | void CombinerHelper::applyCombineConstantFoldFpUnary( | |||
1343 | MachineInstr &MI, std::optional<APFloat> &Cst) { | |||
1344 | assert(Cst && "Optional is unexpectedly empty!")(static_cast <bool> (Cst && "Optional is unexpectedly empty!" ) ? void (0) : __assert_fail ("Cst && \"Optional is unexpectedly empty!\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1344, __extension__ __PRETTY_FUNCTION__)); | |||
1345 | Builder.setInstrAndDebugLoc(MI); | |||
1346 | MachineFunction &MF = Builder.getMF(); | |||
1347 | auto *FPVal = ConstantFP::get(MF.getFunction().getContext(), *Cst); | |||
1348 | Register DstReg = MI.getOperand(0).getReg(); | |||
1349 | Builder.buildFConstant(DstReg, *FPVal); | |||
1350 | MI.eraseFromParent(); | |||
1351 | } | |||
1352 | ||||
1353 | bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI, | |||
1354 | PtrAddChain &MatchInfo) { | |||
1355 | // We're trying to match the following pattern: | |||
1356 | // %t1 = G_PTR_ADD %base, G_CONSTANT imm1 | |||
1357 | // %root = G_PTR_ADD %t1, G_CONSTANT imm2 | |||
1358 | // --> | |||
1359 | // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2) | |||
1360 | ||||
1361 | if (MI.getOpcode() != TargetOpcode::G_PTR_ADD) | |||
1362 | return false; | |||
1363 | ||||
1364 | Register Add2 = MI.getOperand(1).getReg(); | |||
1365 | Register Imm1 = MI.getOperand(2).getReg(); | |||
1366 | auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI); | |||
1367 | if (!MaybeImmVal) | |||
1368 | return false; | |||
1369 | ||||
1370 | MachineInstr *Add2Def = MRI.getVRegDef(Add2); | |||
1371 | if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD) | |||
1372 | return false; | |||
1373 | ||||
1374 | Register Base = Add2Def->getOperand(1).getReg(); | |||
1375 | Register Imm2 = Add2Def->getOperand(2).getReg(); | |||
1376 | auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI); | |||
1377 | if (!MaybeImm2Val) | |||
1378 | return false; | |||
1379 | ||||
1380 | // Check if the new combined immediate forms an illegal addressing mode. | |||
1381 | // Do not combine if it was legal before but would get illegal. | |||
1382 | // To do so, we need to find a load/store user of the pointer to get | |||
1383 | // the access type. | |||
1384 | Type *AccessTy = nullptr; | |||
1385 | auto &MF = *MI.getMF(); | |||
1386 | for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) { | |||
1387 | if (auto *LdSt = dyn_cast<GLoadStore>(&UseMI)) { | |||
1388 | AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)), | |||
1389 | MF.getFunction().getContext()); | |||
1390 | break; | |||
1391 | } | |||
1392 | } | |||
1393 | TargetLoweringBase::AddrMode AMNew; | |||
1394 | APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value; | |||
1395 | AMNew.BaseOffs = CombinedImm.getSExtValue(); | |||
1396 | if (AccessTy) { | |||
1397 | AMNew.HasBaseReg = true; | |||
1398 | TargetLoweringBase::AddrMode AMOld; | |||
1399 | AMOld.BaseOffs = MaybeImm2Val->Value.getSExtValue(); | |||
1400 | AMOld.HasBaseReg = true; | |||
1401 | unsigned AS = MRI.getType(Add2).getAddressSpace(); | |||
1402 | const auto &TLI = *MF.getSubtarget().getTargetLowering(); | |||
1403 | if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) && | |||
1404 | !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS)) | |||
1405 | return false; | |||
1406 | } | |||
1407 | ||||
1408 | // Pass the combined immediate to the apply function. | |||
1409 | MatchInfo.Imm = AMNew.BaseOffs; | |||
1410 | MatchInfo.Base = Base; | |||
1411 | MatchInfo.Bank = getRegBank(Imm2); | |||
1412 | return true; | |||
1413 | } | |||
1414 | ||||
1415 | void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI, | |||
1416 | PtrAddChain &MatchInfo) { | |||
1417 | assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD") ? void (0) : __assert_fail ( "MI.getOpcode() == TargetOpcode::G_PTR_ADD && \"Expected G_PTR_ADD\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1417, __extension__ __PRETTY_FUNCTION__)); | |||
1418 | MachineIRBuilder MIB(MI); | |||
1419 | LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg()); | |||
1420 | auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm); | |||
1421 | setRegBank(NewOffset.getReg(0), MatchInfo.Bank); | |||
1422 | Observer.changingInstr(MI); | |||
1423 | MI.getOperand(1).setReg(MatchInfo.Base); | |||
1424 | MI.getOperand(2).setReg(NewOffset.getReg(0)); | |||
1425 | Observer.changedInstr(MI); | |||
1426 | } | |||
1427 | ||||
1428 | bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI, | |||
1429 | RegisterImmPair &MatchInfo) { | |||
1430 | // We're trying to match the following pattern with any of | |||
1431 | // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions: | |||
1432 | // %t1 = SHIFT %base, G_CONSTANT imm1 | |||
1433 | // %root = SHIFT %t1, G_CONSTANT imm2 | |||
1434 | // --> | |||
1435 | // %root = SHIFT %base, G_CONSTANT (imm1 + imm2) | |||
1436 | ||||
1437 | unsigned Opcode = MI.getOpcode(); | |||
1438 | assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode:: G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1441, __extension__ __PRETTY_FUNCTION__)) | |||
1439 | Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode:: G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1441, __extension__ __PRETTY_FUNCTION__)) | |||
1440 | Opcode == TargetOpcode::G_USHLSAT) &&(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode:: G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1441, __extension__ __PRETTY_FUNCTION__)) | |||
1441 | "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT")(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode:: G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1441, __extension__ __PRETTY_FUNCTION__)); | |||
1442 | ||||
1443 | Register Shl2 = MI.getOperand(1).getReg(); | |||
1444 | Register Imm1 = MI.getOperand(2).getReg(); | |||
1445 | auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI); | |||
1446 | if (!MaybeImmVal) | |||
1447 | return false; | |||
1448 | ||||
1449 | MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2); | |||
1450 | if (Shl2Def->getOpcode() != Opcode) | |||
1451 | return false; | |||
1452 | ||||
1453 | Register Base = Shl2Def->getOperand(1).getReg(); | |||
1454 | Register Imm2 = Shl2Def->getOperand(2).getReg(); | |||
1455 | auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI); | |||
1456 | if (!MaybeImm2Val) | |||
1457 | return false; | |||
1458 | ||||
1459 | // Pass the combined immediate to the apply function. | |||
1460 | MatchInfo.Imm = | |||
1461 | (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue(); | |||
1462 | MatchInfo.Reg = Base; | |||
1463 | ||||
1464 | // There is no simple replacement for a saturating unsigned left shift that | |||
1465 | // exceeds the scalar size. | |||
1466 | if (Opcode == TargetOpcode::G_USHLSAT && | |||
1467 | MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits()) | |||
1468 | return false; | |||
1469 | ||||
1470 | return true; | |||
1471 | } | |||
1472 | ||||
1473 | void CombinerHelper::applyShiftImmedChain(MachineInstr &MI, | |||
1474 | RegisterImmPair &MatchInfo) { | |||
1475 | unsigned Opcode = MI.getOpcode(); | |||
1476 | assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode:: G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1479, __extension__ __PRETTY_FUNCTION__)) | |||
1477 | Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode:: G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1479, __extension__ __PRETTY_FUNCTION__)) | |||
1478 | Opcode == TargetOpcode::G_USHLSAT) &&(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode:: G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1479, __extension__ __PRETTY_FUNCTION__)) | |||
1479 | "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT")(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode:: G_USHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || Opcode == TargetOpcode::G_USHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1479, __extension__ __PRETTY_FUNCTION__)); | |||
1480 | ||||
1481 | Builder.setInstrAndDebugLoc(MI); | |||
1482 | LLT Ty = MRI.getType(MI.getOperand(1).getReg()); | |||
1483 | unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits(); | |||
1484 | auto Imm = MatchInfo.Imm; | |||
1485 | ||||
1486 | if (Imm >= ScalarSizeInBits) { | |||
1487 | // Any logical shift that exceeds scalar size will produce zero. | |||
1488 | if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) { | |||
1489 | Builder.buildConstant(MI.getOperand(0), 0); | |||
1490 | MI.eraseFromParent(); | |||
1491 | return; | |||
1492 | } | |||
1493 | // Arithmetic shift and saturating signed left shift have no effect beyond | |||
1494 | // scalar size. | |||
1495 | Imm = ScalarSizeInBits - 1; | |||
1496 | } | |||
1497 | ||||
1498 | LLT ImmTy = MRI.getType(MI.getOperand(2).getReg()); | |||
1499 | Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0); | |||
1500 | Observer.changingInstr(MI); | |||
1501 | MI.getOperand(1).setReg(MatchInfo.Reg); | |||
1502 | MI.getOperand(2).setReg(NewImm); | |||
1503 | Observer.changedInstr(MI); | |||
1504 | } | |||
1505 | ||||
1506 | bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI, | |||
1507 | ShiftOfShiftedLogic &MatchInfo) { | |||
1508 | // We're trying to match the following pattern with any of | |||
1509 | // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination | |||
1510 | // with any of G_AND/G_OR/G_XOR logic instructions. | |||
1511 | // %t1 = SHIFT %X, G_CONSTANT C0 | |||
1512 | // %t2 = LOGIC %t1, %Y | |||
1513 | // %root = SHIFT %t2, G_CONSTANT C1 | |||
1514 | // --> | |||
1515 | // %t3 = SHIFT %X, G_CONSTANT (C0+C1) | |||
1516 | // %t4 = SHIFT %Y, G_CONSTANT C1 | |||
1517 | // %root = LOGIC %t3, %t4 | |||
1518 | unsigned ShiftOpcode = MI.getOpcode(); | |||
1519 | assert((ShiftOpcode == TargetOpcode::G_SHL ||(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode ::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT" ) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__ __PRETTY_FUNCTION__)) | |||
1520 | ShiftOpcode == TargetOpcode::G_ASHR ||(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode ::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT" ) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__ __PRETTY_FUNCTION__)) | |||
1521 | ShiftOpcode == TargetOpcode::G_LSHR ||(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode ::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT" ) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__ __PRETTY_FUNCTION__)) | |||
1522 | ShiftOpcode == TargetOpcode::G_USHLSAT ||(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode ::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT" ) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__ __PRETTY_FUNCTION__)) | |||
1523 | ShiftOpcode == TargetOpcode::G_SSHLSAT) &&(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode ::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT" ) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__ __PRETTY_FUNCTION__)) | |||
1524 | "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT")(static_cast <bool> ((ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode ::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT" ) ? void (0) : __assert_fail ("(ShiftOpcode == TargetOpcode::G_SHL || ShiftOpcode == TargetOpcode::G_ASHR || ShiftOpcode == TargetOpcode::G_LSHR || ShiftOpcode == TargetOpcode::G_USHLSAT || ShiftOpcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1524, __extension__ __PRETTY_FUNCTION__)); | |||
1525 | ||||
1526 | // Match a one-use bitwise logic op. | |||
1527 | Register LogicDest = MI.getOperand(1).getReg(); | |||
1528 | if (!MRI.hasOneNonDBGUse(LogicDest)) | |||
1529 | return false; | |||
1530 | ||||
1531 | MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest); | |||
1532 | unsigned LogicOpcode = LogicMI->getOpcode(); | |||
1533 | if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR && | |||
1534 | LogicOpcode != TargetOpcode::G_XOR) | |||
1535 | return false; | |||
1536 | ||||
1537 | // Find a matching one-use shift by constant. | |||
1538 | const Register C1 = MI.getOperand(2).getReg(); | |||
1539 | auto MaybeImmVal = getIConstantVRegValWithLookThrough(C1, MRI); | |||
1540 | if (!MaybeImmVal) | |||
1541 | return false; | |||
1542 | ||||
1543 | const uint64_t C1Val = MaybeImmVal->Value.getZExtValue(); | |||
1544 | ||||
1545 | auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) { | |||
1546 | // Shift should match previous one and should be a one-use. | |||
1547 | if (MI->getOpcode() != ShiftOpcode || | |||
1548 | !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg())) | |||
1549 | return false; | |||
1550 | ||||
1551 | // Must be a constant. | |||
1552 | auto MaybeImmVal = | |||
1553 | getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); | |||
1554 | if (!MaybeImmVal) | |||
1555 | return false; | |||
1556 | ||||
1557 | ShiftVal = MaybeImmVal->Value.getSExtValue(); | |||
1558 | return true; | |||
1559 | }; | |||
1560 | ||||
1561 | // Logic ops are commutative, so check each operand for a match. | |||
1562 | Register LogicMIReg1 = LogicMI->getOperand(1).getReg(); | |||
1563 | MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1); | |||
1564 | Register LogicMIReg2 = LogicMI->getOperand(2).getReg(); | |||
1565 | MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2); | |||
1566 | uint64_t C0Val; | |||
1567 | ||||
1568 | if (matchFirstShift(LogicMIOp1, C0Val)) { | |||
1569 | MatchInfo.LogicNonShiftReg = LogicMIReg2; | |||
1570 | MatchInfo.Shift2 = LogicMIOp1; | |||
1571 | } else if (matchFirstShift(LogicMIOp2, C0Val)) { | |||
1572 | MatchInfo.LogicNonShiftReg = LogicMIReg1; | |||
1573 | MatchInfo.Shift2 = LogicMIOp2; | |||
1574 | } else | |||
1575 | return false; | |||
1576 | ||||
1577 | MatchInfo.ValSum = C0Val + C1Val; | |||
1578 | ||||
1579 | // The fold is not valid if the sum of the shift values exceeds bitwidth. | |||
1580 | if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits()) | |||
1581 | return false; | |||
1582 | ||||
1583 | MatchInfo.Logic = LogicMI; | |||
1584 | return true; | |||
1585 | } | |||
1586 | ||||
1587 | void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI, | |||
1588 | ShiftOfShiftedLogic &MatchInfo) { | |||
1589 | unsigned Opcode = MI.getOpcode(); | |||
1590 | assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode:: G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1593, __extension__ __PRETTY_FUNCTION__)) | |||
1591 | Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode:: G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1593, __extension__ __PRETTY_FUNCTION__)) | |||
1592 | Opcode == TargetOpcode::G_SSHLSAT) &&(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode:: G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1593, __extension__ __PRETTY_FUNCTION__)) | |||
1593 | "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT")(static_cast <bool> ((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode:: G_SSHLSAT) && "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT" ) ? void (0) : __assert_fail ("(Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || Opcode == TargetOpcode::G_SSHLSAT) && \"Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1593, __extension__ __PRETTY_FUNCTION__)); | |||
1594 | ||||
1595 | LLT ShlType = MRI.getType(MI.getOperand(2).getReg()); | |||
1596 | LLT DestType = MRI.getType(MI.getOperand(0).getReg()); | |||
1597 | Builder.setInstrAndDebugLoc(MI); | |||
1598 | ||||
1599 | Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0); | |||
1600 | ||||
1601 | Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg(); | |||
1602 | Register Shift1 = | |||
1603 | Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0); | |||
1604 | ||||
1605 | // If LogicNonShiftReg is the same to Shift1Base, and shift1 const is the same | |||
1606 | // to MatchInfo.Shift2 const, CSEMIRBuilder will reuse the old shift1 when | |||
1607 | // build shift2. So, if we erase MatchInfo.Shift2 at the end, actually we | |||
1608 | // remove old shift1. And it will cause crash later. So erase it earlier to | |||
1609 | // avoid the crash. | |||
1610 | MatchInfo.Shift2->eraseFromParent(); | |||
1611 | ||||
1612 | Register Shift2Const = MI.getOperand(2).getReg(); | |||
1613 | Register Shift2 = Builder | |||
1614 | .buildInstr(Opcode, {DestType}, | |||
1615 | {MatchInfo.LogicNonShiftReg, Shift2Const}) | |||
1616 | .getReg(0); | |||
1617 | ||||
1618 | Register Dest = MI.getOperand(0).getReg(); | |||
1619 | Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2}); | |||
1620 | ||||
1621 | // This was one use so it's safe to remove it. | |||
1622 | MatchInfo.Logic->eraseFromParent(); | |||
1623 | ||||
1624 | MI.eraseFromParent(); | |||
1625 | } | |||
1626 | ||||
1627 | bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI, | |||
1628 | unsigned &ShiftVal) { | |||
1629 | assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_MUL && \"Expected a G_MUL\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1629, __extension__ __PRETTY_FUNCTION__)); | |||
1630 | auto MaybeImmVal = | |||
1631 | getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); | |||
1632 | if (!MaybeImmVal) | |||
1633 | return false; | |||
1634 | ||||
1635 | ShiftVal = MaybeImmVal->Value.exactLogBase2(); | |||
1636 | return (static_cast<int32_t>(ShiftVal) != -1); | |||
1637 | } | |||
1638 | ||||
1639 | void CombinerHelper::applyCombineMulToShl(MachineInstr &MI, | |||
1640 | unsigned &ShiftVal) { | |||
1641 | assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_MUL && \"Expected a G_MUL\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1641, __extension__ __PRETTY_FUNCTION__)); | |||
1642 | MachineIRBuilder MIB(MI); | |||
1643 | LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg()); | |||
1644 | auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal); | |||
1645 | Observer.changingInstr(MI); | |||
1646 | MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL)); | |||
1647 | MI.getOperand(2).setReg(ShiftCst.getReg(0)); | |||
1648 | Observer.changedInstr(MI); | |||
1649 | } | |||
1650 | ||||
1651 | // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source | |||
1652 | bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI, | |||
1653 | RegisterImmPair &MatchData) { | |||
1654 | assert(MI.getOpcode() == TargetOpcode::G_SHL && KB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHL && KB) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SHL && KB" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1654, __extension__ __PRETTY_FUNCTION__)); | |||
1655 | ||||
1656 | Register LHS = MI.getOperand(1).getReg(); | |||
1657 | ||||
1658 | Register ExtSrc; | |||
1659 | if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) && | |||
1660 | !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) && | |||
1661 | !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc)))) | |||
1662 | return false; | |||
1663 | ||||
1664 | // TODO: Should handle vector splat. | |||
1665 | Register RHS = MI.getOperand(2).getReg(); | |||
1666 | auto MaybeShiftAmtVal = getIConstantVRegValWithLookThrough(RHS, MRI); | |||
1667 | if (!MaybeShiftAmtVal) | |||
1668 | return false; | |||
1669 | ||||
1670 | if (LI) { | |||
1671 | LLT SrcTy = MRI.getType(ExtSrc); | |||
1672 | ||||
1673 | // We only really care about the legality with the shifted value. We can | |||
1674 | // pick any type the constant shift amount, so ask the target what to | |||
1675 | // use. Otherwise we would have to guess and hope it is reported as legal. | |||
1676 | LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy); | |||
1677 | if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}})) | |||
1678 | return false; | |||
1679 | } | |||
1680 | ||||
1681 | int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue(); | |||
1682 | MatchData.Reg = ExtSrc; | |||
1683 | MatchData.Imm = ShiftAmt; | |||
1684 | ||||
1685 | unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countl_one(); | |||
1686 | return MinLeadingZeros >= ShiftAmt; | |||
1687 | } | |||
1688 | ||||
1689 | void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI, | |||
1690 | const RegisterImmPair &MatchData) { | |||
1691 | Register ExtSrcReg = MatchData.Reg; | |||
1692 | int64_t ShiftAmtVal = MatchData.Imm; | |||
1693 | ||||
1694 | LLT ExtSrcTy = MRI.getType(ExtSrcReg); | |||
1695 | Builder.setInstrAndDebugLoc(MI); | |||
1696 | auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal); | |||
1697 | auto NarrowShift = | |||
1698 | Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags()); | |||
1699 | Builder.buildZExt(MI.getOperand(0), NarrowShift); | |||
1700 | MI.eraseFromParent(); | |||
1701 | } | |||
1702 | ||||
1703 | bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr &MI, | |||
1704 | Register &MatchInfo) { | |||
1705 | GMerge &Merge = cast<GMerge>(MI); | |||
1706 | SmallVector<Register, 16> MergedValues; | |||
1707 | for (unsigned I = 0; I < Merge.getNumSources(); ++I) | |||
1708 | MergedValues.emplace_back(Merge.getSourceReg(I)); | |||
1709 | ||||
1710 | auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0], MRI); | |||
1711 | if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources()) | |||
1712 | return false; | |||
1713 | ||||
1714 | for (unsigned I = 0; I < MergedValues.size(); ++I) | |||
1715 | if (MergedValues[I] != Unmerge->getReg(I)) | |||
1716 | return false; | |||
1717 | ||||
1718 | MatchInfo = Unmerge->getSourceReg(); | |||
1719 | return true; | |||
1720 | } | |||
1721 | ||||
1722 | static Register peekThroughBitcast(Register Reg, | |||
1723 | const MachineRegisterInfo &MRI) { | |||
1724 | while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg)))) | |||
1725 | ; | |||
1726 | ||||
1727 | return Reg; | |||
1728 | } | |||
1729 | ||||
1730 | bool CombinerHelper::matchCombineUnmergeMergeToPlainValues( | |||
1731 | MachineInstr &MI, SmallVectorImpl<Register> &Operands) { | |||
1732 | assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1733, __extension__ __PRETTY_FUNCTION__)) | |||
1733 | "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1733, __extension__ __PRETTY_FUNCTION__)); | |||
1734 | auto &Unmerge = cast<GUnmerge>(MI); | |||
1735 | Register SrcReg = peekThroughBitcast(Unmerge.getSourceReg(), MRI); | |||
1736 | ||||
1737 | auto *SrcInstr = getOpcodeDef<GMergeLikeInstr>(SrcReg, MRI); | |||
1738 | if (!SrcInstr) | |||
1739 | return false; | |||
1740 | ||||
1741 | // Check the source type of the merge. | |||
1742 | LLT SrcMergeTy = MRI.getType(SrcInstr->getSourceReg(0)); | |||
1743 | LLT Dst0Ty = MRI.getType(Unmerge.getReg(0)); | |||
1744 | bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits(); | |||
1745 | if (SrcMergeTy != Dst0Ty && !SameSize) | |||
1746 | return false; | |||
1747 | // They are the same now (modulo a bitcast). | |||
1748 | // We can collect all the src registers. | |||
1749 | for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx) | |||
1750 | Operands.push_back(SrcInstr->getSourceReg(Idx)); | |||
1751 | return true; | |||
1752 | } | |||
1753 | ||||
1754 | void CombinerHelper::applyCombineUnmergeMergeToPlainValues( | |||
1755 | MachineInstr &MI, SmallVectorImpl<Register> &Operands) { | |||
1756 | assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1757, __extension__ __PRETTY_FUNCTION__)) | |||
1757 | "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1757, __extension__ __PRETTY_FUNCTION__)); | |||
1758 | assert((MI.getNumOperands() - 1 == Operands.size()) &&(static_cast <bool> ((MI.getNumOperands() - 1 == Operands .size()) && "Not enough operands to replace all defs" ) ? void (0) : __assert_fail ("(MI.getNumOperands() - 1 == Operands.size()) && \"Not enough operands to replace all defs\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1759, __extension__ __PRETTY_FUNCTION__)) | |||
1759 | "Not enough operands to replace all defs")(static_cast <bool> ((MI.getNumOperands() - 1 == Operands .size()) && "Not enough operands to replace all defs" ) ? void (0) : __assert_fail ("(MI.getNumOperands() - 1 == Operands.size()) && \"Not enough operands to replace all defs\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1759, __extension__ __PRETTY_FUNCTION__)); | |||
1760 | unsigned NumElems = MI.getNumOperands() - 1; | |||
1761 | ||||
1762 | LLT SrcTy = MRI.getType(Operands[0]); | |||
1763 | LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); | |||
1764 | bool CanReuseInputDirectly = DstTy == SrcTy; | |||
1765 | Builder.setInstrAndDebugLoc(MI); | |||
1766 | for (unsigned Idx = 0; Idx < NumElems; ++Idx) { | |||
1767 | Register DstReg = MI.getOperand(Idx).getReg(); | |||
1768 | Register SrcReg = Operands[Idx]; | |||
1769 | ||||
1770 | // This combine may run after RegBankSelect, so we need to be aware of | |||
1771 | // register banks. | |||
1772 | const auto &DstCB = MRI.getRegClassOrRegBank(DstReg); | |||
1773 | if (!DstCB.isNull() && DstCB != MRI.getRegClassOrRegBank(SrcReg)) { | |||
1774 | SrcReg = Builder.buildCopy(MRI.getType(SrcReg), SrcReg).getReg(0); | |||
1775 | MRI.setRegClassOrRegBank(SrcReg, DstCB); | |||
1776 | } | |||
1777 | ||||
1778 | if (CanReuseInputDirectly) | |||
1779 | replaceRegWith(MRI, DstReg, SrcReg); | |||
1780 | else | |||
1781 | Builder.buildCast(DstReg, SrcReg); | |||
1782 | } | |||
1783 | MI.eraseFromParent(); | |||
1784 | } | |||
1785 | ||||
1786 | bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI, | |||
1787 | SmallVectorImpl<APInt> &Csts) { | |||
1788 | unsigned SrcIdx = MI.getNumOperands() - 1; | |||
1789 | Register SrcReg = MI.getOperand(SrcIdx).getReg(); | |||
1790 | MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg); | |||
1791 | if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT && | |||
1792 | SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT) | |||
1793 | return false; | |||
1794 | // Break down the big constant in smaller ones. | |||
1795 | const MachineOperand &CstVal = SrcInstr->getOperand(1); | |||
1796 | APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT | |||
1797 | ? CstVal.getCImm()->getValue() | |||
1798 | : CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); | |||
1799 | ||||
1800 | LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg()); | |||
1801 | unsigned ShiftAmt = Dst0Ty.getSizeInBits(); | |||
1802 | // Unmerge a constant. | |||
1803 | for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) { | |||
1804 | Csts.emplace_back(Val.trunc(ShiftAmt)); | |||
1805 | Val = Val.lshr(ShiftAmt); | |||
1806 | } | |||
1807 | ||||
1808 | return true; | |||
1809 | } | |||
1810 | ||||
1811 | void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI, | |||
1812 | SmallVectorImpl<APInt> &Csts) { | |||
1813 | assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1814, __extension__ __PRETTY_FUNCTION__)) | |||
1814 | "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1814, __extension__ __PRETTY_FUNCTION__)); | |||
1815 | assert((MI.getNumOperands() - 1 == Csts.size()) &&(static_cast <bool> ((MI.getNumOperands() - 1 == Csts.size ()) && "Not enough operands to replace all defs") ? void (0) : __assert_fail ("(MI.getNumOperands() - 1 == Csts.size()) && \"Not enough operands to replace all defs\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1816, __extension__ __PRETTY_FUNCTION__)) | |||
1816 | "Not enough operands to replace all defs")(static_cast <bool> ((MI.getNumOperands() - 1 == Csts.size ()) && "Not enough operands to replace all defs") ? void (0) : __assert_fail ("(MI.getNumOperands() - 1 == Csts.size()) && \"Not enough operands to replace all defs\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1816, __extension__ __PRETTY_FUNCTION__)); | |||
1817 | unsigned NumElems = MI.getNumOperands() - 1; | |||
1818 | Builder.setInstrAndDebugLoc(MI); | |||
1819 | for (unsigned Idx = 0; Idx < NumElems; ++Idx) { | |||
1820 | Register DstReg = MI.getOperand(Idx).getReg(); | |||
1821 | Builder.buildConstant(DstReg, Csts[Idx]); | |||
1822 | } | |||
1823 | ||||
1824 | MI.eraseFromParent(); | |||
1825 | } | |||
1826 | ||||
1827 | bool CombinerHelper::matchCombineUnmergeUndef( | |||
1828 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
1829 | unsigned SrcIdx = MI.getNumOperands() - 1; | |||
1830 | Register SrcReg = MI.getOperand(SrcIdx).getReg(); | |||
1831 | MatchInfo = [&MI](MachineIRBuilder &B) { | |||
1832 | unsigned NumElems = MI.getNumOperands() - 1; | |||
1833 | for (unsigned Idx = 0; Idx < NumElems; ++Idx) { | |||
1834 | Register DstReg = MI.getOperand(Idx).getReg(); | |||
1835 | B.buildUndef(DstReg); | |||
1836 | } | |||
1837 | }; | |||
1838 | return isa<GImplicitDef>(MRI.getVRegDef(SrcReg)); | |||
1839 | } | |||
1840 | ||||
1841 | bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) { | |||
1842 | assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1843, __extension__ __PRETTY_FUNCTION__)) | |||
1843 | "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1843, __extension__ __PRETTY_FUNCTION__)); | |||
1844 | // Check that all the lanes are dead except the first one. | |||
1845 | for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) { | |||
1846 | if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg())) | |||
1847 | return false; | |||
1848 | } | |||
1849 | return true; | |||
1850 | } | |||
1851 | ||||
1852 | void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) { | |||
1853 | Builder.setInstrAndDebugLoc(MI); | |||
1854 | Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg(); | |||
1855 | // Truncating a vector is going to truncate every single lane, | |||
1856 | // whereas we want the full lowbits. | |||
1857 | // Do the operation on a scalar instead. | |||
1858 | LLT SrcTy = MRI.getType(SrcReg); | |||
1859 | if (SrcTy.isVector()) | |||
1860 | SrcReg = | |||
1861 | Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0); | |||
1862 | ||||
1863 | Register Dst0Reg = MI.getOperand(0).getReg(); | |||
1864 | LLT Dst0Ty = MRI.getType(Dst0Reg); | |||
1865 | if (Dst0Ty.isVector()) { | |||
1866 | auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg); | |||
1867 | Builder.buildCast(Dst0Reg, MIB); | |||
1868 | } else | |||
1869 | Builder.buildTrunc(Dst0Reg, SrcReg); | |||
1870 | MI.eraseFromParent(); | |||
1871 | } | |||
1872 | ||||
1873 | bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) { | |||
1874 | assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1875, __extension__ __PRETTY_FUNCTION__)) | |||
1875 | "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1875, __extension__ __PRETTY_FUNCTION__)); | |||
1876 | Register Dst0Reg = MI.getOperand(0).getReg(); | |||
1877 | LLT Dst0Ty = MRI.getType(Dst0Reg); | |||
1878 | // G_ZEXT on vector applies to each lane, so it will | |||
1879 | // affect all destinations. Therefore we won't be able | |||
1880 | // to simplify the unmerge to just the first definition. | |||
1881 | if (Dst0Ty.isVector()) | |||
1882 | return false; | |||
1883 | Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg(); | |||
1884 | LLT SrcTy = MRI.getType(SrcReg); | |||
1885 | if (SrcTy.isVector()) | |||
1886 | return false; | |||
1887 | ||||
1888 | Register ZExtSrcReg; | |||
1889 | if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg)))) | |||
1890 | return false; | |||
1891 | ||||
1892 | // Finally we can replace the first definition with | |||
1893 | // a zext of the source if the definition is big enough to hold | |||
1894 | // all of ZExtSrc bits. | |||
1895 | LLT ZExtSrcTy = MRI.getType(ZExtSrcReg); | |||
1896 | return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits(); | |||
1897 | } | |||
1898 | ||||
1899 | void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) { | |||
1900 | assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1901, __extension__ __PRETTY_FUNCTION__)) | |||
1901 | "Expected an unmerge")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "Expected an unmerge") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && \"Expected an unmerge\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1901, __extension__ __PRETTY_FUNCTION__)); | |||
1902 | ||||
1903 | Register Dst0Reg = MI.getOperand(0).getReg(); | |||
1904 | ||||
1905 | MachineInstr *ZExtInstr = | |||
1906 | MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg()); | |||
1907 | assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&(static_cast <bool> (ZExtInstr && ZExtInstr-> getOpcode() == TargetOpcode::G_ZEXT && "Expecting a G_ZEXT" ) ? void (0) : __assert_fail ("ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT && \"Expecting a G_ZEXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1908, __extension__ __PRETTY_FUNCTION__)) | |||
1908 | "Expecting a G_ZEXT")(static_cast <bool> (ZExtInstr && ZExtInstr-> getOpcode() == TargetOpcode::G_ZEXT && "Expecting a G_ZEXT" ) ? void (0) : __assert_fail ("ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT && \"Expecting a G_ZEXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1908, __extension__ __PRETTY_FUNCTION__)); | |||
1909 | ||||
1910 | Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg(); | |||
1911 | LLT Dst0Ty = MRI.getType(Dst0Reg); | |||
1912 | LLT ZExtSrcTy = MRI.getType(ZExtSrcReg); | |||
1913 | ||||
1914 | Builder.setInstrAndDebugLoc(MI); | |||
1915 | ||||
1916 | if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) { | |||
1917 | Builder.buildZExt(Dst0Reg, ZExtSrcReg); | |||
1918 | } else { | |||
1919 | assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&(static_cast <bool> (Dst0Ty.getSizeInBits() == ZExtSrcTy .getSizeInBits() && "ZExt src doesn't fit in destination" ) ? void (0) : __assert_fail ("Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() && \"ZExt src doesn't fit in destination\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1920, __extension__ __PRETTY_FUNCTION__)) | |||
1920 | "ZExt src doesn't fit in destination")(static_cast <bool> (Dst0Ty.getSizeInBits() == ZExtSrcTy .getSizeInBits() && "ZExt src doesn't fit in destination" ) ? void (0) : __assert_fail ("Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() && \"ZExt src doesn't fit in destination\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1920, __extension__ __PRETTY_FUNCTION__)); | |||
1921 | replaceRegWith(MRI, Dst0Reg, ZExtSrcReg); | |||
1922 | } | |||
1923 | ||||
1924 | Register ZeroReg; | |||
1925 | for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) { | |||
1926 | if (!ZeroReg) | |||
1927 | ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0); | |||
1928 | replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg); | |||
1929 | } | |||
1930 | MI.eraseFromParent(); | |||
1931 | } | |||
1932 | ||||
1933 | bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI, | |||
1934 | unsigned TargetShiftSize, | |||
1935 | unsigned &ShiftVal) { | |||
1936 | assert((MI.getOpcode() == TargetOpcode::G_SHL ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_SHL || MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift") ? void ( 0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_SHL || MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() == TargetOpcode::G_ASHR) && \"Expected a shift\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1938, __extension__ __PRETTY_FUNCTION__)) | |||
1937 | MI.getOpcode() == TargetOpcode::G_LSHR ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_SHL || MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift") ? void ( 0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_SHL || MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() == TargetOpcode::G_ASHR) && \"Expected a shift\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1938, __extension__ __PRETTY_FUNCTION__)) | |||
1938 | MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift")(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_SHL || MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift") ? void ( 0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_SHL || MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() == TargetOpcode::G_ASHR) && \"Expected a shift\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 1938, __extension__ __PRETTY_FUNCTION__)); | |||
1939 | ||||
1940 | LLT Ty = MRI.getType(MI.getOperand(0).getReg()); | |||
1941 | if (Ty.isVector()) // TODO: | |||
1942 | return false; | |||
1943 | ||||
1944 | // Don't narrow further than the requested size. | |||
1945 | unsigned Size = Ty.getSizeInBits(); | |||
1946 | if (Size <= TargetShiftSize) | |||
1947 | return false; | |||
1948 | ||||
1949 | auto MaybeImmVal = | |||
1950 | getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); | |||
1951 | if (!MaybeImmVal) | |||
1952 | return false; | |||
1953 | ||||
1954 | ShiftVal = MaybeImmVal->Value.getSExtValue(); | |||
1955 | return ShiftVal >= Size / 2 && ShiftVal < Size; | |||
1956 | } | |||
1957 | ||||
1958 | void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI, | |||
1959 | const unsigned &ShiftVal) { | |||
1960 | Register DstReg = MI.getOperand(0).getReg(); | |||
1961 | Register SrcReg = MI.getOperand(1).getReg(); | |||
1962 | LLT Ty = MRI.getType(SrcReg); | |||
1963 | unsigned Size = Ty.getSizeInBits(); | |||
1964 | unsigned HalfSize = Size / 2; | |||
1965 | assert(ShiftVal >= HalfSize)(static_cast <bool> (ShiftVal >= HalfSize) ? void (0 ) : __assert_fail ("ShiftVal >= HalfSize", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp" , 1965, __extension__ __PRETTY_FUNCTION__)); | |||
1966 | ||||
1967 | LLT HalfTy = LLT::scalar(HalfSize); | |||
1968 | ||||
1969 | Builder.setInstr(MI); | |||
1970 | auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg); | |||
1971 | unsigned NarrowShiftAmt = ShiftVal - HalfSize; | |||
1972 | ||||
1973 | if (MI.getOpcode() == TargetOpcode::G_LSHR) { | |||
1974 | Register Narrowed = Unmerge.getReg(1); | |||
1975 | ||||
1976 | // dst = G_LSHR s64:x, C for C >= 32 | |||
1977 | // => | |||
1978 | // lo, hi = G_UNMERGE_VALUES x | |||
1979 | // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0 | |||
1980 | ||||
1981 | if (NarrowShiftAmt != 0) { | |||
1982 | Narrowed = Builder.buildLShr(HalfTy, Narrowed, | |||
1983 | Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); | |||
1984 | } | |||
1985 | ||||
1986 | auto Zero = Builder.buildConstant(HalfTy, 0); | |||
1987 | Builder.buildMergeLikeInstr(DstReg, {Narrowed, Zero}); | |||
1988 | } else if (MI.getOpcode() == TargetOpcode::G_SHL) { | |||
1989 | Register Narrowed = Unmerge.getReg(0); | |||
1990 | // dst = G_SHL s64:x, C for C >= 32 | |||
1991 | // => | |||
1992 | // lo, hi = G_UNMERGE_VALUES x | |||
1993 | // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32) | |||
1994 | if (NarrowShiftAmt != 0) { | |||
1995 | Narrowed = Builder.buildShl(HalfTy, Narrowed, | |||
1996 | Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); | |||
1997 | } | |||
1998 | ||||
1999 | auto Zero = Builder.buildConstant(HalfTy, 0); | |||
2000 | Builder.buildMergeLikeInstr(DstReg, {Zero, Narrowed}); | |||
2001 | } else { | |||
2002 | assert(MI.getOpcode() == TargetOpcode::G_ASHR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ASHR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ASHR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2002, __extension__ __PRETTY_FUNCTION__)); | |||
2003 | auto Hi = Builder.buildAShr( | |||
2004 | HalfTy, Unmerge.getReg(1), | |||
2005 | Builder.buildConstant(HalfTy, HalfSize - 1)); | |||
2006 | ||||
2007 | if (ShiftVal == HalfSize) { | |||
2008 | // (G_ASHR i64:x, 32) -> | |||
2009 | // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31) | |||
2010 | Builder.buildMergeLikeInstr(DstReg, {Unmerge.getReg(1), Hi}); | |||
2011 | } else if (ShiftVal == Size - 1) { | |||
2012 | // Don't need a second shift. | |||
2013 | // (G_ASHR i64:x, 63) -> | |||
2014 | // %narrowed = (G_ASHR hi_32(x), 31) | |||
2015 | // G_MERGE_VALUES %narrowed, %narrowed | |||
2016 | Builder.buildMergeLikeInstr(DstReg, {Hi, Hi}); | |||
2017 | } else { | |||
2018 | auto Lo = Builder.buildAShr( | |||
2019 | HalfTy, Unmerge.getReg(1), | |||
2020 | Builder.buildConstant(HalfTy, ShiftVal - HalfSize)); | |||
2021 | ||||
2022 | // (G_ASHR i64:x, C) ->, for C >= 32 | |||
2023 | // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31) | |||
2024 | Builder.buildMergeLikeInstr(DstReg, {Lo, Hi}); | |||
2025 | } | |||
2026 | } | |||
2027 | ||||
2028 | MI.eraseFromParent(); | |||
2029 | } | |||
2030 | ||||
2031 | bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI, | |||
2032 | unsigned TargetShiftAmount) { | |||
2033 | unsigned ShiftAmt; | |||
2034 | if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) { | |||
2035 | applyCombineShiftToUnmerge(MI, ShiftAmt); | |||
2036 | return true; | |||
2037 | } | |||
2038 | ||||
2039 | return false; | |||
2040 | } | |||
2041 | ||||
2042 | bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) { | |||
2043 | assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_INTTOPTR && \"Expected a G_INTTOPTR\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2043, __extension__ __PRETTY_FUNCTION__)); | |||
2044 | Register DstReg = MI.getOperand(0).getReg(); | |||
2045 | LLT DstTy = MRI.getType(DstReg); | |||
2046 | Register SrcReg = MI.getOperand(1).getReg(); | |||
2047 | return mi_match(SrcReg, MRI, | |||
2048 | m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg)))); | |||
2049 | } | |||
2050 | ||||
2051 | void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) { | |||
2052 | assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_INTTOPTR && \"Expected a G_INTTOPTR\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2052, __extension__ __PRETTY_FUNCTION__)); | |||
2053 | Register DstReg = MI.getOperand(0).getReg(); | |||
2054 | Builder.setInstr(MI); | |||
2055 | Builder.buildCopy(DstReg, Reg); | |||
2056 | MI.eraseFromParent(); | |||
2057 | } | |||
2058 | ||||
2059 | void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) { | |||
2060 | assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_PTRTOINT && \"Expected a G_PTRTOINT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2060, __extension__ __PRETTY_FUNCTION__)); | |||
2061 | Register DstReg = MI.getOperand(0).getReg(); | |||
2062 | Builder.setInstr(MI); | |||
2063 | Builder.buildZExtOrTrunc(DstReg, Reg); | |||
2064 | MI.eraseFromParent(); | |||
2065 | } | |||
2066 | ||||
2067 | bool CombinerHelper::matchCombineAddP2IToPtrAdd( | |||
2068 | MachineInstr &MI, std::pair<Register, bool> &PtrReg) { | |||
2069 | assert(MI.getOpcode() == TargetOpcode::G_ADD)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ADD ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ADD" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2069, __extension__ __PRETTY_FUNCTION__)); | |||
2070 | Register LHS = MI.getOperand(1).getReg(); | |||
2071 | Register RHS = MI.getOperand(2).getReg(); | |||
2072 | LLT IntTy = MRI.getType(LHS); | |||
2073 | ||||
2074 | // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the | |||
2075 | // instruction. | |||
2076 | PtrReg.second = false; | |||
2077 | for (Register SrcReg : {LHS, RHS}) { | |||
2078 | if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) { | |||
2079 | // Don't handle cases where the integer is implicitly converted to the | |||
2080 | // pointer width. | |||
2081 | LLT PtrTy = MRI.getType(PtrReg.first); | |||
2082 | if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits()) | |||
2083 | return true; | |||
2084 | } | |||
2085 | ||||
2086 | PtrReg.second = true; | |||
2087 | } | |||
2088 | ||||
2089 | return false; | |||
2090 | } | |||
2091 | ||||
2092 | void CombinerHelper::applyCombineAddP2IToPtrAdd( | |||
2093 | MachineInstr &MI, std::pair<Register, bool> &PtrReg) { | |||
2094 | Register Dst = MI.getOperand(0).getReg(); | |||
2095 | Register LHS = MI.getOperand(1).getReg(); | |||
2096 | Register RHS = MI.getOperand(2).getReg(); | |||
2097 | ||||
2098 | const bool DoCommute = PtrReg.second; | |||
2099 | if (DoCommute) | |||
2100 | std::swap(LHS, RHS); | |||
2101 | LHS = PtrReg.first; | |||
2102 | ||||
2103 | LLT PtrTy = MRI.getType(LHS); | |||
2104 | ||||
2105 | Builder.setInstrAndDebugLoc(MI); | |||
2106 | auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS); | |||
2107 | Builder.buildPtrToInt(Dst, PtrAdd); | |||
2108 | MI.eraseFromParent(); | |||
2109 | } | |||
2110 | ||||
2111 | bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI, | |||
2112 | APInt &NewCst) { | |||
2113 | auto &PtrAdd = cast<GPtrAdd>(MI); | |||
2114 | Register LHS = PtrAdd.getBaseReg(); | |||
2115 | Register RHS = PtrAdd.getOffsetReg(); | |||
2116 | MachineRegisterInfo &MRI = Builder.getMF().getRegInfo(); | |||
2117 | ||||
2118 | if (auto RHSCst = getIConstantVRegVal(RHS, MRI)) { | |||
2119 | APInt Cst; | |||
2120 | if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) { | |||
2121 | auto DstTy = MRI.getType(PtrAdd.getReg(0)); | |||
2122 | // G_INTTOPTR uses zero-extension | |||
2123 | NewCst = Cst.zextOrTrunc(DstTy.getSizeInBits()); | |||
2124 | NewCst += RHSCst->sextOrTrunc(DstTy.getSizeInBits()); | |||
2125 | return true; | |||
2126 | } | |||
2127 | } | |||
2128 | ||||
2129 | return false; | |||
2130 | } | |||
2131 | ||||
2132 | void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI, | |||
2133 | APInt &NewCst) { | |||
2134 | auto &PtrAdd = cast<GPtrAdd>(MI); | |||
2135 | Register Dst = PtrAdd.getReg(0); | |||
2136 | ||||
2137 | Builder.setInstrAndDebugLoc(MI); | |||
2138 | Builder.buildConstant(Dst, NewCst); | |||
2139 | PtrAdd.eraseFromParent(); | |||
2140 | } | |||
2141 | ||||
2142 | bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) { | |||
2143 | assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ANYEXT && \"Expected a G_ANYEXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2143, __extension__ __PRETTY_FUNCTION__)); | |||
2144 | Register DstReg = MI.getOperand(0).getReg(); | |||
2145 | Register SrcReg = MI.getOperand(1).getReg(); | |||
2146 | LLT DstTy = MRI.getType(DstReg); | |||
2147 | return mi_match(SrcReg, MRI, | |||
2148 | m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy)))); | |||
2149 | } | |||
2150 | ||||
2151 | bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) { | |||
2152 | assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT") ? void (0) : __assert_fail ( "MI.getOpcode() == TargetOpcode::G_ZEXT && \"Expected a G_ZEXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2152, __extension__ __PRETTY_FUNCTION__)); | |||
2153 | Register DstReg = MI.getOperand(0).getReg(); | |||
2154 | Register SrcReg = MI.getOperand(1).getReg(); | |||
2155 | LLT DstTy = MRI.getType(DstReg); | |||
2156 | if (mi_match(SrcReg, MRI, | |||
2157 | m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) { | |||
2158 | unsigned DstSize = DstTy.getScalarSizeInBits(); | |||
2159 | unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits(); | |||
2160 | return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize; | |||
2161 | } | |||
2162 | return false; | |||
2163 | } | |||
2164 | ||||
2165 | bool CombinerHelper::matchCombineExtOfExt( | |||
2166 | MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { | |||
2167 | assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2170, __extension__ __PRETTY_FUNCTION__)) | |||
2168 | MI.getOpcode() == TargetOpcode::G_SEXT ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2170, __extension__ __PRETTY_FUNCTION__)) | |||
2169 | MI.getOpcode() == TargetOpcode::G_ZEXT) &&(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2170, __extension__ __PRETTY_FUNCTION__)) | |||
2170 | "Expected a G_[ASZ]EXT")(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2170, __extension__ __PRETTY_FUNCTION__)); | |||
2171 | Register SrcReg = MI.getOperand(1).getReg(); | |||
2172 | MachineInstr *SrcMI = MRI.getVRegDef(SrcReg); | |||
2173 | // Match exts with the same opcode, anyext([sz]ext) and sext(zext). | |||
2174 | unsigned Opc = MI.getOpcode(); | |||
2175 | unsigned SrcOpc = SrcMI->getOpcode(); | |||
2176 | if (Opc == SrcOpc || | |||
2177 | (Opc == TargetOpcode::G_ANYEXT && | |||
2178 | (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) || | |||
2179 | (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) { | |||
2180 | MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc); | |||
2181 | return true; | |||
2182 | } | |||
2183 | return false; | |||
2184 | } | |||
2185 | ||||
2186 | void CombinerHelper::applyCombineExtOfExt( | |||
2187 | MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { | |||
2188 | assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2191, __extension__ __PRETTY_FUNCTION__)) | |||
2189 | MI.getOpcode() == TargetOpcode::G_SEXT ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2191, __extension__ __PRETTY_FUNCTION__)) | |||
2190 | MI.getOpcode() == TargetOpcode::G_ZEXT) &&(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2191, __extension__ __PRETTY_FUNCTION__)) | |||
2191 | "Expected a G_[ASZ]EXT")(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && "Expected a G_[ASZ]EXT") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_ANYEXT || MI.getOpcode() == TargetOpcode::G_SEXT || MI.getOpcode() == TargetOpcode::G_ZEXT) && \"Expected a G_[ASZ]EXT\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2191, __extension__ __PRETTY_FUNCTION__)); | |||
2192 | ||||
2193 | Register Reg = std::get<0>(MatchInfo); | |||
2194 | unsigned SrcExtOp = std::get<1>(MatchInfo); | |||
2195 | ||||
2196 | // Combine exts with the same opcode. | |||
2197 | if (MI.getOpcode() == SrcExtOp) { | |||
2198 | Observer.changingInstr(MI); | |||
2199 | MI.getOperand(1).setReg(Reg); | |||
2200 | Observer.changedInstr(MI); | |||
2201 | return; | |||
2202 | } | |||
2203 | ||||
2204 | // Combine: | |||
2205 | // - anyext([sz]ext x) to [sz]ext x | |||
2206 | // - sext(zext x) to zext x | |||
2207 | if (MI.getOpcode() == TargetOpcode::G_ANYEXT || | |||
2208 | (MI.getOpcode() == TargetOpcode::G_SEXT && | |||
2209 | SrcExtOp == TargetOpcode::G_ZEXT)) { | |||
2210 | Register DstReg = MI.getOperand(0).getReg(); | |||
2211 | Builder.setInstrAndDebugLoc(MI); | |||
2212 | Builder.buildInstr(SrcExtOp, {DstReg}, {Reg}); | |||
2213 | MI.eraseFromParent(); | |||
2214 | } | |||
2215 | } | |||
2216 | ||||
2217 | void CombinerHelper::applyCombineMulByNegativeOne(MachineInstr &MI) { | |||
2218 | assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_MUL && \"Expected a G_MUL\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2218, __extension__ __PRETTY_FUNCTION__)); | |||
2219 | Register DstReg = MI.getOperand(0).getReg(); | |||
2220 | Register SrcReg = MI.getOperand(1).getReg(); | |||
2221 | LLT DstTy = MRI.getType(DstReg); | |||
2222 | ||||
2223 | Builder.setInstrAndDebugLoc(MI); | |||
2224 | Builder.buildSub(DstReg, Builder.buildConstant(DstTy, 0), SrcReg, | |||
2225 | MI.getFlags()); | |||
2226 | MI.eraseFromParent(); | |||
2227 | } | |||
2228 | ||||
2229 | bool CombinerHelper::matchCombineFAbsOfFNeg(MachineInstr &MI, | |||
2230 | BuildFnTy &MatchInfo) { | |||
2231 | assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS") ? void (0) : __assert_fail ( "MI.getOpcode() == TargetOpcode::G_FABS && \"Expected a G_FABS\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2231, __extension__ __PRETTY_FUNCTION__)); | |||
2232 | Register Src = MI.getOperand(1).getReg(); | |||
2233 | Register NegSrc; | |||
2234 | ||||
2235 | if (!mi_match(Src, MRI, m_GFNeg(m_Reg(NegSrc)))) | |||
2236 | return false; | |||
2237 | ||||
2238 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
2239 | Observer.changingInstr(MI); | |||
2240 | MI.getOperand(1).setReg(NegSrc); | |||
2241 | Observer.changedInstr(MI); | |||
2242 | }; | |||
2243 | return true; | |||
2244 | } | |||
2245 | ||||
2246 | bool CombinerHelper::matchCombineTruncOfExt( | |||
2247 | MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) { | |||
2248 | assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC") ? void (0) : __assert_fail ( "MI.getOpcode() == TargetOpcode::G_TRUNC && \"Expected a G_TRUNC\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2248, __extension__ __PRETTY_FUNCTION__)); | |||
2249 | Register SrcReg = MI.getOperand(1).getReg(); | |||
2250 | MachineInstr *SrcMI = MRI.getVRegDef(SrcReg); | |||
2251 | unsigned SrcOpc = SrcMI->getOpcode(); | |||
2252 | if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT || | |||
2253 | SrcOpc == TargetOpcode::G_ZEXT) { | |||
2254 | MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc); | |||
2255 | return true; | |||
2256 | } | |||
2257 | return false; | |||
2258 | } | |||
2259 | ||||
2260 | void CombinerHelper::applyCombineTruncOfExt( | |||
2261 | MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) { | |||
2262 | assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC") ? void (0) : __assert_fail ( "MI.getOpcode() == TargetOpcode::G_TRUNC && \"Expected a G_TRUNC\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2262, __extension__ __PRETTY_FUNCTION__)); | |||
2263 | Register SrcReg = MatchInfo.first; | |||
2264 | unsigned SrcExtOp = MatchInfo.second; | |||
2265 | Register DstReg = MI.getOperand(0).getReg(); | |||
2266 | LLT SrcTy = MRI.getType(SrcReg); | |||
2267 | LLT DstTy = MRI.getType(DstReg); | |||
2268 | if (SrcTy == DstTy) { | |||
2269 | MI.eraseFromParent(); | |||
2270 | replaceRegWith(MRI, DstReg, SrcReg); | |||
2271 | return; | |||
2272 | } | |||
2273 | Builder.setInstrAndDebugLoc(MI); | |||
2274 | if (SrcTy.getSizeInBits() < DstTy.getSizeInBits()) | |||
2275 | Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg}); | |||
2276 | else | |||
2277 | Builder.buildTrunc(DstReg, SrcReg); | |||
2278 | MI.eraseFromParent(); | |||
2279 | } | |||
2280 | ||||
2281 | static LLT getMidVTForTruncRightShiftCombine(LLT ShiftTy, LLT TruncTy) { | |||
2282 | const unsigned ShiftSize = ShiftTy.getScalarSizeInBits(); | |||
2283 | const unsigned TruncSize = TruncTy.getScalarSizeInBits(); | |||
2284 | ||||
2285 | // ShiftTy > 32 > TruncTy -> 32 | |||
2286 | if (ShiftSize > 32 && TruncSize < 32) | |||
2287 | return ShiftTy.changeElementSize(32); | |||
2288 | ||||
2289 | // TODO: We could also reduce to 16 bits, but that's more target-dependent. | |||
2290 | // Some targets like it, some don't, some only like it under certain | |||
2291 | // conditions/processor versions, etc. | |||
2292 | // A TL hook might be needed for this. | |||
2293 | ||||
2294 | // Don't combine | |||
2295 | return ShiftTy; | |||
2296 | } | |||
2297 | ||||
2298 | bool CombinerHelper::matchCombineTruncOfShift( | |||
2299 | MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) { | |||
2300 | assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC") ? void (0) : __assert_fail ( "MI.getOpcode() == TargetOpcode::G_TRUNC && \"Expected a G_TRUNC\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2300, __extension__ __PRETTY_FUNCTION__)); | |||
2301 | Register DstReg = MI.getOperand(0).getReg(); | |||
2302 | Register SrcReg = MI.getOperand(1).getReg(); | |||
2303 | ||||
2304 | if (!MRI.hasOneNonDBGUse(SrcReg)) | |||
2305 | return false; | |||
2306 | ||||
2307 | LLT SrcTy = MRI.getType(SrcReg); | |||
2308 | LLT DstTy = MRI.getType(DstReg); | |||
2309 | ||||
2310 | MachineInstr *SrcMI = getDefIgnoringCopies(SrcReg, MRI); | |||
2311 | const auto &TL = getTargetLowering(); | |||
2312 | ||||
2313 | LLT NewShiftTy; | |||
2314 | switch (SrcMI->getOpcode()) { | |||
2315 | default: | |||
2316 | return false; | |||
2317 | case TargetOpcode::G_SHL: { | |||
2318 | NewShiftTy = DstTy; | |||
2319 | ||||
2320 | // Make sure new shift amount is legal. | |||
2321 | KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg()); | |||
2322 | if (Known.getMaxValue().uge(NewShiftTy.getScalarSizeInBits())) | |||
2323 | return false; | |||
2324 | break; | |||
2325 | } | |||
2326 | case TargetOpcode::G_LSHR: | |||
2327 | case TargetOpcode::G_ASHR: { | |||
2328 | // For right shifts, we conservatively do not do the transform if the TRUNC | |||
2329 | // has any STORE users. The reason is that if we change the type of the | |||
2330 | // shift, we may break the truncstore combine. | |||
2331 | // | |||
2332 | // TODO: Fix truncstore combine to handle (trunc(lshr (trunc x), k)). | |||
2333 | for (auto &User : MRI.use_instructions(DstReg)) | |||
2334 | if (User.getOpcode() == TargetOpcode::G_STORE) | |||
2335 | return false; | |||
2336 | ||||
2337 | NewShiftTy = getMidVTForTruncRightShiftCombine(SrcTy, DstTy); | |||
2338 | if (NewShiftTy == SrcTy) | |||
2339 | return false; | |||
2340 | ||||
2341 | // Make sure we won't lose information by truncating the high bits. | |||
2342 | KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg()); | |||
2343 | if (Known.getMaxValue().ugt(NewShiftTy.getScalarSizeInBits() - | |||
2344 | DstTy.getScalarSizeInBits())) | |||
2345 | return false; | |||
2346 | break; | |||
2347 | } | |||
2348 | } | |||
2349 | ||||
2350 | if (!isLegalOrBeforeLegalizer( | |||
2351 | {SrcMI->getOpcode(), | |||
2352 | {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}})) | |||
2353 | return false; | |||
2354 | ||||
2355 | MatchInfo = std::make_pair(SrcMI, NewShiftTy); | |||
2356 | return true; | |||
2357 | } | |||
2358 | ||||
2359 | void CombinerHelper::applyCombineTruncOfShift( | |||
2360 | MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) { | |||
2361 | Builder.setInstrAndDebugLoc(MI); | |||
2362 | ||||
2363 | MachineInstr *ShiftMI = MatchInfo.first; | |||
2364 | LLT NewShiftTy = MatchInfo.second; | |||
2365 | ||||
2366 | Register Dst = MI.getOperand(0).getReg(); | |||
2367 | LLT DstTy = MRI.getType(Dst); | |||
2368 | ||||
2369 | Register ShiftAmt = ShiftMI->getOperand(2).getReg(); | |||
2370 | Register ShiftSrc = ShiftMI->getOperand(1).getReg(); | |||
2371 | ShiftSrc = Builder.buildTrunc(NewShiftTy, ShiftSrc).getReg(0); | |||
2372 | ||||
2373 | Register NewShift = | |||
2374 | Builder | |||
2375 | .buildInstr(ShiftMI->getOpcode(), {NewShiftTy}, {ShiftSrc, ShiftAmt}) | |||
2376 | .getReg(0); | |||
2377 | ||||
2378 | if (NewShiftTy == DstTy) | |||
2379 | replaceRegWith(MRI, Dst, NewShift); | |||
2380 | else | |||
2381 | Builder.buildTrunc(Dst, NewShift); | |||
2382 | ||||
2383 | eraseInst(MI); | |||
2384 | } | |||
2385 | ||||
2386 | bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) { | |||
2387 | return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) { | |||
2388 | return MO.isReg() && | |||
2389 | getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); | |||
2390 | }); | |||
2391 | } | |||
2392 | ||||
2393 | bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) { | |||
2394 | return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) { | |||
2395 | return !MO.isReg() || | |||
2396 | getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); | |||
2397 | }); | |||
2398 | } | |||
2399 | ||||
2400 | bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) { | |||
2401 | assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2401, __extension__ __PRETTY_FUNCTION__)); | |||
2402 | ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); | |||
2403 | return all_of(Mask, [](int Elt) { return Elt < 0; }); | |||
2404 | } | |||
2405 | ||||
2406 | bool CombinerHelper::matchUndefStore(MachineInstr &MI) { | |||
2407 | assert(MI.getOpcode() == TargetOpcode::G_STORE)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_STORE ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_STORE" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2407, __extension__ __PRETTY_FUNCTION__)); | |||
2408 | return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(), | |||
2409 | MRI); | |||
2410 | } | |||
2411 | ||||
2412 | bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) { | |||
2413 | assert(MI.getOpcode() == TargetOpcode::G_SELECT)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SELECT ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SELECT" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2413, __extension__ __PRETTY_FUNCTION__)); | |||
2414 | return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(), | |||
2415 | MRI); | |||
2416 | } | |||
2417 | ||||
2418 | bool CombinerHelper::matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) { | |||
2419 | assert((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT || MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) && "Expected an insert/extract element op") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT || MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) && \"Expected an insert/extract element op\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2421, __extension__ __PRETTY_FUNCTION__)) | |||
2420 | MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT || MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) && "Expected an insert/extract element op") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT || MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) && \"Expected an insert/extract element op\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2421, __extension__ __PRETTY_FUNCTION__)) | |||
2421 | "Expected an insert/extract element op")(static_cast <bool> ((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT || MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) && "Expected an insert/extract element op") ? void (0) : __assert_fail ("(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT || MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) && \"Expected an insert/extract element op\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2421, __extension__ __PRETTY_FUNCTION__)); | |||
2422 | LLT VecTy = MRI.getType(MI.getOperand(1).getReg()); | |||
2423 | unsigned IdxIdx = | |||
2424 | MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3; | |||
2425 | auto Idx = getIConstantVRegVal(MI.getOperand(IdxIdx).getReg(), MRI); | |||
2426 | if (!Idx) | |||
2427 | return false; | |||
2428 | return Idx->getZExtValue() >= VecTy.getNumElements(); | |||
2429 | } | |||
2430 | ||||
2431 | bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) { | |||
2432 | GSelect &SelMI = cast<GSelect>(MI); | |||
2433 | auto Cst = | |||
2434 | isConstantOrConstantSplatVector(*MRI.getVRegDef(SelMI.getCondReg()), MRI); | |||
2435 | if (!Cst) | |||
2436 | return false; | |||
2437 | OpIdx = Cst->isZero() ? 3 : 2; | |||
2438 | return true; | |||
2439 | } | |||
2440 | ||||
2441 | bool CombinerHelper::eraseInst(MachineInstr &MI) { | |||
2442 | MI.eraseFromParent(); | |||
2443 | return true; | |||
2444 | } | |||
2445 | ||||
2446 | bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1, | |||
2447 | const MachineOperand &MOP2) { | |||
2448 | if (!MOP1.isReg() || !MOP2.isReg()) | |||
2449 | return false; | |||
2450 | auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI); | |||
2451 | if (!InstAndDef1) | |||
2452 | return false; | |||
2453 | auto InstAndDef2 = getDefSrcRegIgnoringCopies(MOP2.getReg(), MRI); | |||
2454 | if (!InstAndDef2) | |||
2455 | return false; | |||
2456 | MachineInstr *I1 = InstAndDef1->MI; | |||
2457 | MachineInstr *I2 = InstAndDef2->MI; | |||
2458 | ||||
2459 | // Handle a case like this: | |||
2460 | // | |||
2461 | // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>) | |||
2462 | // | |||
2463 | // Even though %0 and %1 are produced by the same instruction they are not | |||
2464 | // the same values. | |||
2465 | if (I1 == I2) | |||
2466 | return MOP1.getReg() == MOP2.getReg(); | |||
2467 | ||||
2468 | // If we have an instruction which loads or stores, we can't guarantee that | |||
2469 | // it is identical. | |||
2470 | // | |||
2471 | // For example, we may have | |||
2472 | // | |||
2473 | // %x1 = G_LOAD %addr (load N from @somewhere) | |||
2474 | // ... | |||
2475 | // call @foo | |||
2476 | // ... | |||
2477 | // %x2 = G_LOAD %addr (load N from @somewhere) | |||
2478 | // ... | |||
2479 | // %or = G_OR %x1, %x2 | |||
2480 | // | |||
2481 | // It's possible that @foo will modify whatever lives at the address we're | |||
2482 | // loading from. To be safe, let's just assume that all loads and stores | |||
2483 | // are different (unless we have something which is guaranteed to not | |||
2484 | // change.) | |||
2485 | if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad()) | |||
2486 | return false; | |||
2487 | ||||
2488 | // If both instructions are loads or stores, they are equal only if both | |||
2489 | // are dereferenceable invariant loads with the same number of bits. | |||
2490 | if (I1->mayLoadOrStore() && I2->mayLoadOrStore()) { | |||
2491 | GLoadStore *LS1 = dyn_cast<GLoadStore>(I1); | |||
2492 | GLoadStore *LS2 = dyn_cast<GLoadStore>(I2); | |||
2493 | if (!LS1 || !LS2) | |||
2494 | return false; | |||
2495 | ||||
2496 | if (!I2->isDereferenceableInvariantLoad() || | |||
2497 | (LS1->getMemSizeInBits() != LS2->getMemSizeInBits())) | |||
2498 | return false; | |||
2499 | } | |||
2500 | ||||
2501 | // Check for physical registers on the instructions first to avoid cases | |||
2502 | // like this: | |||
2503 | // | |||
2504 | // %a = COPY $physreg | |||
2505 | // ... | |||
2506 | // SOMETHING implicit-def $physreg | |||
2507 | // ... | |||
2508 | // %b = COPY $physreg | |||
2509 | // | |||
2510 | // These copies are not equivalent. | |||
2511 | if (any_of(I1->uses(), [](const MachineOperand &MO) { | |||
2512 | return MO.isReg() && MO.getReg().isPhysical(); | |||
2513 | })) { | |||
2514 | // Check if we have a case like this: | |||
2515 | // | |||
2516 | // %a = COPY $physreg | |||
2517 | // %b = COPY %a | |||
2518 | // | |||
2519 | // In this case, I1 and I2 will both be equal to %a = COPY $physreg. | |||
2520 | // From that, we know that they must have the same value, since they must | |||
2521 | // have come from the same COPY. | |||
2522 | return I1->isIdenticalTo(*I2); | |||
2523 | } | |||
2524 | ||||
2525 | // We don't have any physical registers, so we don't necessarily need the | |||
2526 | // same vreg defs. | |||
2527 | // | |||
2528 | // On the off-chance that there's some target instruction feeding into the | |||
2529 | // instruction, let's use produceSameValue instead of isIdenticalTo. | |||
2530 | if (Builder.getTII().produceSameValue(*I1, *I2, &MRI)) { | |||
2531 | // Handle instructions with multiple defs that produce same values. Values | |||
2532 | // are same for operands with same index. | |||
2533 | // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>) | |||
2534 | // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>) | |||
2535 | // I1 and I2 are different instructions but produce same values, | |||
2536 | // %1 and %6 are same, %1 and %7 are not the same value. | |||
2537 | return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) == | |||
2538 | I2->findRegisterDefOperandIdx(InstAndDef2->Reg); | |||
2539 | } | |||
2540 | return false; | |||
2541 | } | |||
2542 | ||||
2543 | bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) { | |||
2544 | if (!MOP.isReg()) | |||
2545 | return false; | |||
2546 | auto *MI = MRI.getVRegDef(MOP.getReg()); | |||
2547 | auto MaybeCst = isConstantOrConstantSplatVector(*MI, MRI); | |||
2548 | return MaybeCst && MaybeCst->getBitWidth() <= 64 && | |||
2549 | MaybeCst->getSExtValue() == C; | |||
2550 | } | |||
2551 | ||||
2552 | bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI, | |||
2553 | unsigned OpIdx) { | |||
2554 | assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?")(static_cast <bool> (MI.getNumExplicitDefs() == 1 && "Expected one explicit def?") ? void (0) : __assert_fail ("MI.getNumExplicitDefs() == 1 && \"Expected one explicit def?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2554, __extension__ __PRETTY_FUNCTION__)); | |||
2555 | Register OldReg = MI.getOperand(0).getReg(); | |||
2556 | Register Replacement = MI.getOperand(OpIdx).getReg(); | |||
2557 | assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?")(static_cast <bool> (canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?") ? void (0) : __assert_fail ("canReplaceReg(OldReg, Replacement, MRI) && \"Cannot replace register?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2557, __extension__ __PRETTY_FUNCTION__)); | |||
2558 | MI.eraseFromParent(); | |||
2559 | replaceRegWith(MRI, OldReg, Replacement); | |||
2560 | return true; | |||
2561 | } | |||
2562 | ||||
2563 | bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI, | |||
2564 | Register Replacement) { | |||
2565 | assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?")(static_cast <bool> (MI.getNumExplicitDefs() == 1 && "Expected one explicit def?") ? void (0) : __assert_fail ("MI.getNumExplicitDefs() == 1 && \"Expected one explicit def?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2565, __extension__ __PRETTY_FUNCTION__)); | |||
2566 | Register OldReg = MI.getOperand(0).getReg(); | |||
2567 | assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?")(static_cast <bool> (canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?") ? void (0) : __assert_fail ("canReplaceReg(OldReg, Replacement, MRI) && \"Cannot replace register?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2567, __extension__ __PRETTY_FUNCTION__)); | |||
2568 | MI.eraseFromParent(); | |||
2569 | replaceRegWith(MRI, OldReg, Replacement); | |||
2570 | return true; | |||
2571 | } | |||
2572 | ||||
2573 | bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) { | |||
2574 | assert(MI.getOpcode() == TargetOpcode::G_SELECT)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SELECT ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SELECT" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2574, __extension__ __PRETTY_FUNCTION__)); | |||
2575 | // Match (cond ? x : x) | |||
2576 | return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) && | |||
2577 | canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(), | |||
2578 | MRI); | |||
2579 | } | |||
2580 | ||||
2581 | bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) { | |||
2582 | return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) && | |||
2583 | canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), | |||
2584 | MRI); | |||
2585 | } | |||
2586 | ||||
2587 | bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) { | |||
2588 | return matchConstantOp(MI.getOperand(OpIdx), 0) && | |||
2589 | canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(), | |||
2590 | MRI); | |||
2591 | } | |||
2592 | ||||
2593 | bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) { | |||
2594 | MachineOperand &MO = MI.getOperand(OpIdx); | |||
2595 | return MO.isReg() && | |||
2596 | getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); | |||
2597 | } | |||
2598 | ||||
2599 | bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, | |||
2600 | unsigned OpIdx) { | |||
2601 | MachineOperand &MO = MI.getOperand(OpIdx); | |||
2602 | return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB); | |||
2603 | } | |||
2604 | ||||
2605 | bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) { | |||
2606 | assert(MI.getNumDefs() == 1 && "Expected only one def?")(static_cast <bool> (MI.getNumDefs() == 1 && "Expected only one def?" ) ? void (0) : __assert_fail ("MI.getNumDefs() == 1 && \"Expected only one def?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2606, __extension__ __PRETTY_FUNCTION__)); | |||
2607 | Builder.setInstr(MI); | |||
2608 | Builder.buildFConstant(MI.getOperand(0), C); | |||
2609 | MI.eraseFromParent(); | |||
2610 | return true; | |||
2611 | } | |||
2612 | ||||
2613 | bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) { | |||
2614 | assert(MI.getNumDefs() == 1 && "Expected only one def?")(static_cast <bool> (MI.getNumDefs() == 1 && "Expected only one def?" ) ? void (0) : __assert_fail ("MI.getNumDefs() == 1 && \"Expected only one def?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2614, __extension__ __PRETTY_FUNCTION__)); | |||
2615 | Builder.setInstr(MI); | |||
2616 | Builder.buildConstant(MI.getOperand(0), C); | |||
2617 | MI.eraseFromParent(); | |||
2618 | return true; | |||
2619 | } | |||
2620 | ||||
2621 | bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) { | |||
2622 | assert(MI.getNumDefs() == 1 && "Expected only one def?")(static_cast <bool> (MI.getNumDefs() == 1 && "Expected only one def?" ) ? void (0) : __assert_fail ("MI.getNumDefs() == 1 && \"Expected only one def?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2622, __extension__ __PRETTY_FUNCTION__)); | |||
2623 | Builder.setInstr(MI); | |||
2624 | Builder.buildConstant(MI.getOperand(0), C); | |||
2625 | MI.eraseFromParent(); | |||
2626 | return true; | |||
2627 | } | |||
2628 | ||||
2629 | bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) { | |||
2630 | assert(MI.getNumDefs() == 1 && "Expected only one def?")(static_cast <bool> (MI.getNumDefs() == 1 && "Expected only one def?" ) ? void (0) : __assert_fail ("MI.getNumDefs() == 1 && \"Expected only one def?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2630, __extension__ __PRETTY_FUNCTION__)); | |||
2631 | Builder.setInstr(MI); | |||
2632 | Builder.buildUndef(MI.getOperand(0)); | |||
2633 | MI.eraseFromParent(); | |||
2634 | return true; | |||
2635 | } | |||
2636 | ||||
2637 | bool CombinerHelper::matchSimplifyAddToSub( | |||
2638 | MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) { | |||
2639 | Register LHS = MI.getOperand(1).getReg(); | |||
2640 | Register RHS = MI.getOperand(2).getReg(); | |||
2641 | Register &NewLHS = std::get<0>(MatchInfo); | |||
2642 | Register &NewRHS = std::get<1>(MatchInfo); | |||
2643 | ||||
2644 | // Helper lambda to check for opportunities for | |||
2645 | // ((0-A) + B) -> B - A | |||
2646 | // (A + (0-B)) -> A - B | |||
2647 | auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) { | |||
2648 | if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS)))) | |||
2649 | return false; | |||
2650 | NewLHS = MaybeNewLHS; | |||
2651 | return true; | |||
2652 | }; | |||
2653 | ||||
2654 | return CheckFold(LHS, RHS) || CheckFold(RHS, LHS); | |||
2655 | } | |||
2656 | ||||
2657 | bool CombinerHelper::matchCombineInsertVecElts( | |||
2658 | MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) { | |||
2659 | assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT && "Invalid opcode") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT && \"Invalid opcode\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2660, __extension__ __PRETTY_FUNCTION__)) | |||
2660 | "Invalid opcode")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT && "Invalid opcode") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT && \"Invalid opcode\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2660, __extension__ __PRETTY_FUNCTION__)); | |||
2661 | Register DstReg = MI.getOperand(0).getReg(); | |||
2662 | LLT DstTy = MRI.getType(DstReg); | |||
2663 | assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?")(static_cast <bool> (DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?" ) ? void (0) : __assert_fail ("DstTy.isVector() && \"Invalid G_INSERT_VECTOR_ELT?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2663, __extension__ __PRETTY_FUNCTION__)); | |||
2664 | unsigned NumElts = DstTy.getNumElements(); | |||
2665 | // If this MI is part of a sequence of insert_vec_elts, then | |||
2666 | // don't do the combine in the middle of the sequence. | |||
2667 | if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() == | |||
2668 | TargetOpcode::G_INSERT_VECTOR_ELT) | |||
2669 | return false; | |||
2670 | MachineInstr *CurrInst = &MI; | |||
2671 | MachineInstr *TmpInst; | |||
2672 | int64_t IntImm; | |||
2673 | Register TmpReg; | |||
2674 | MatchInfo.resize(NumElts); | |||
2675 | while (mi_match( | |||
2676 | CurrInst->getOperand(0).getReg(), MRI, | |||
2677 | m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) { | |||
2678 | if (IntImm >= NumElts || IntImm < 0) | |||
2679 | return false; | |||
2680 | if (!MatchInfo[IntImm]) | |||
2681 | MatchInfo[IntImm] = TmpReg; | |||
2682 | CurrInst = TmpInst; | |||
2683 | } | |||
2684 | // Variable index. | |||
2685 | if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT) | |||
2686 | return false; | |||
2687 | if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) { | |||
2688 | for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) { | |||
2689 | if (!MatchInfo[I - 1].isValid()) | |||
2690 | MatchInfo[I - 1] = TmpInst->getOperand(I).getReg(); | |||
2691 | } | |||
2692 | return true; | |||
2693 | } | |||
2694 | // If we didn't end in a G_IMPLICIT_DEF, bail out. | |||
2695 | return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF; | |||
2696 | } | |||
2697 | ||||
2698 | void CombinerHelper::applyCombineInsertVecElts( | |||
2699 | MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) { | |||
2700 | Builder.setInstr(MI); | |||
2701 | Register UndefReg; | |||
2702 | auto GetUndef = [&]() { | |||
2703 | if (UndefReg) | |||
2704 | return UndefReg; | |||
2705 | LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); | |||
2706 | UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0); | |||
2707 | return UndefReg; | |||
2708 | }; | |||
2709 | for (unsigned I = 0; I < MatchInfo.size(); ++I) { | |||
2710 | if (!MatchInfo[I]) | |||
2711 | MatchInfo[I] = GetUndef(); | |||
2712 | } | |||
2713 | Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo); | |||
2714 | MI.eraseFromParent(); | |||
2715 | } | |||
2716 | ||||
2717 | void CombinerHelper::applySimplifyAddToSub( | |||
2718 | MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) { | |||
2719 | Builder.setInstr(MI); | |||
2720 | Register SubLHS, SubRHS; | |||
2721 | std::tie(SubLHS, SubRHS) = MatchInfo; | |||
2722 | Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS); | |||
2723 | MI.eraseFromParent(); | |||
2724 | } | |||
2725 | ||||
2726 | bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands( | |||
2727 | MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) { | |||
2728 | // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ... | |||
2729 | // | |||
2730 | // Creates the new hand + logic instruction (but does not insert them.) | |||
2731 | // | |||
2732 | // On success, MatchInfo is populated with the new instructions. These are | |||
2733 | // inserted in applyHoistLogicOpWithSameOpcodeHands. | |||
2734 | unsigned LogicOpcode = MI.getOpcode(); | |||
2735 | assert(LogicOpcode == TargetOpcode::G_AND ||(static_cast <bool> (LogicOpcode == TargetOpcode::G_AND || LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode ::G_XOR) ? void (0) : __assert_fail ("LogicOpcode == TargetOpcode::G_AND || LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode::G_XOR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2737, __extension__ __PRETTY_FUNCTION__)) | |||
2736 | LogicOpcode == TargetOpcode::G_OR ||(static_cast <bool> (LogicOpcode == TargetOpcode::G_AND || LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode ::G_XOR) ? void (0) : __assert_fail ("LogicOpcode == TargetOpcode::G_AND || LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode::G_XOR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2737, __extension__ __PRETTY_FUNCTION__)) | |||
2737 | LogicOpcode == TargetOpcode::G_XOR)(static_cast <bool> (LogicOpcode == TargetOpcode::G_AND || LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode ::G_XOR) ? void (0) : __assert_fail ("LogicOpcode == TargetOpcode::G_AND || LogicOpcode == TargetOpcode::G_OR || LogicOpcode == TargetOpcode::G_XOR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2737, __extension__ __PRETTY_FUNCTION__)); | |||
2738 | MachineIRBuilder MIB(MI); | |||
2739 | Register Dst = MI.getOperand(0).getReg(); | |||
2740 | Register LHSReg = MI.getOperand(1).getReg(); | |||
2741 | Register RHSReg = MI.getOperand(2).getReg(); | |||
2742 | ||||
2743 | // Don't recompute anything. | |||
2744 | if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg)) | |||
2745 | return false; | |||
2746 | ||||
2747 | // Make sure we have (hand x, ...), (hand y, ...) | |||
2748 | MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI); | |||
2749 | MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI); | |||
2750 | if (!LeftHandInst || !RightHandInst) | |||
2751 | return false; | |||
2752 | unsigned HandOpcode = LeftHandInst->getOpcode(); | |||
2753 | if (HandOpcode != RightHandInst->getOpcode()) | |||
2754 | return false; | |||
2755 | if (!LeftHandInst->getOperand(1).isReg() || | |||
2756 | !RightHandInst->getOperand(1).isReg()) | |||
2757 | return false; | |||
2758 | ||||
2759 | // Make sure the types match up, and if we're doing this post-legalization, | |||
2760 | // we end up with legal types. | |||
2761 | Register X = LeftHandInst->getOperand(1).getReg(); | |||
2762 | Register Y = RightHandInst->getOperand(1).getReg(); | |||
2763 | LLT XTy = MRI.getType(X); | |||
2764 | LLT YTy = MRI.getType(Y); | |||
2765 | if (!XTy.isValid() || XTy != YTy) | |||
2766 | return false; | |||
2767 | ||||
2768 | // Optional extra source register. | |||
2769 | Register ExtraHandOpSrcReg; | |||
2770 | switch (HandOpcode) { | |||
2771 | default: | |||
2772 | return false; | |||
2773 | case TargetOpcode::G_ANYEXT: | |||
2774 | case TargetOpcode::G_SEXT: | |||
2775 | case TargetOpcode::G_ZEXT: { | |||
2776 | // Match: logic (ext X), (ext Y) --> ext (logic X, Y) | |||
2777 | break; | |||
2778 | } | |||
2779 | case TargetOpcode::G_AND: | |||
2780 | case TargetOpcode::G_ASHR: | |||
2781 | case TargetOpcode::G_LSHR: | |||
2782 | case TargetOpcode::G_SHL: { | |||
2783 | // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z | |||
2784 | MachineOperand &ZOp = LeftHandInst->getOperand(2); | |||
2785 | if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2))) | |||
2786 | return false; | |||
2787 | ExtraHandOpSrcReg = ZOp.getReg(); | |||
2788 | break; | |||
2789 | } | |||
2790 | } | |||
2791 | ||||
2792 | if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}})) | |||
2793 | return false; | |||
2794 | ||||
2795 | // Record the steps to build the new instructions. | |||
2796 | // | |||
2797 | // Steps to build (logic x, y) | |||
2798 | auto NewLogicDst = MRI.createGenericVirtualRegister(XTy); | |||
2799 | OperandBuildSteps LogicBuildSteps = { | |||
2800 | [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); }, | |||
2801 | [=](MachineInstrBuilder &MIB) { MIB.addReg(X); }, | |||
2802 | [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }}; | |||
2803 | InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps); | |||
2804 | ||||
2805 | // Steps to build hand (logic x, y), ...z | |||
2806 | OperandBuildSteps HandBuildSteps = { | |||
2807 | [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); }, | |||
2808 | [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }}; | |||
2809 | if (ExtraHandOpSrcReg.isValid()) | |||
2810 | HandBuildSteps.push_back( | |||
2811 | [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); }); | |||
2812 | InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps); | |||
2813 | ||||
2814 | MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps}); | |||
2815 | return true; | |||
2816 | } | |||
2817 | ||||
2818 | void CombinerHelper::applyBuildInstructionSteps( | |||
2819 | MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) { | |||
2820 | assert(MatchInfo.InstrsToBuild.size() &&(static_cast <bool> (MatchInfo.InstrsToBuild.size() && "Expected at least one instr to build?") ? void (0) : __assert_fail ("MatchInfo.InstrsToBuild.size() && \"Expected at least one instr to build?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2821, __extension__ __PRETTY_FUNCTION__)) | |||
2821 | "Expected at least one instr to build?")(static_cast <bool> (MatchInfo.InstrsToBuild.size() && "Expected at least one instr to build?") ? void (0) : __assert_fail ("MatchInfo.InstrsToBuild.size() && \"Expected at least one instr to build?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2821, __extension__ __PRETTY_FUNCTION__)); | |||
2822 | Builder.setInstr(MI); | |||
2823 | for (auto &InstrToBuild : MatchInfo.InstrsToBuild) { | |||
2824 | assert(InstrToBuild.Opcode && "Expected a valid opcode?")(static_cast <bool> (InstrToBuild.Opcode && "Expected a valid opcode?" ) ? void (0) : __assert_fail ("InstrToBuild.Opcode && \"Expected a valid opcode?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2824, __extension__ __PRETTY_FUNCTION__)); | |||
2825 | assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?")(static_cast <bool> (InstrToBuild.OperandFns.size() && "Expected at least one operand?") ? void (0) : __assert_fail ("InstrToBuild.OperandFns.size() && \"Expected at least one operand?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2825, __extension__ __PRETTY_FUNCTION__)); | |||
2826 | MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode); | |||
2827 | for (auto &OperandFn : InstrToBuild.OperandFns) | |||
2828 | OperandFn(Instr); | |||
2829 | } | |||
2830 | MI.eraseFromParent(); | |||
2831 | } | |||
2832 | ||||
2833 | bool CombinerHelper::matchAshrShlToSextInreg( | |||
2834 | MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) { | |||
2835 | assert(MI.getOpcode() == TargetOpcode::G_ASHR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ASHR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ASHR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2835, __extension__ __PRETTY_FUNCTION__)); | |||
2836 | int64_t ShlCst, AshrCst; | |||
2837 | Register Src; | |||
2838 | if (!mi_match(MI.getOperand(0).getReg(), MRI, | |||
2839 | m_GAShr(m_GShl(m_Reg(Src), m_ICstOrSplat(ShlCst)), | |||
2840 | m_ICstOrSplat(AshrCst)))) | |||
2841 | return false; | |||
2842 | if (ShlCst != AshrCst) | |||
2843 | return false; | |||
2844 | if (!isLegalOrBeforeLegalizer( | |||
2845 | {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}})) | |||
2846 | return false; | |||
2847 | MatchInfo = std::make_tuple(Src, ShlCst); | |||
2848 | return true; | |||
2849 | } | |||
2850 | ||||
2851 | void CombinerHelper::applyAshShlToSextInreg( | |||
2852 | MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) { | |||
2853 | assert(MI.getOpcode() == TargetOpcode::G_ASHR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ASHR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ASHR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2853, __extension__ __PRETTY_FUNCTION__)); | |||
2854 | Register Src; | |||
2855 | int64_t ShiftAmt; | |||
2856 | std::tie(Src, ShiftAmt) = MatchInfo; | |||
2857 | unsigned Size = MRI.getType(Src).getScalarSizeInBits(); | |||
2858 | Builder.setInstrAndDebugLoc(MI); | |||
2859 | Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt); | |||
2860 | MI.eraseFromParent(); | |||
2861 | } | |||
2862 | ||||
2863 | /// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0 | |||
2864 | bool CombinerHelper::matchOverlappingAnd( | |||
2865 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
2866 | assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2866, __extension__ __PRETTY_FUNCTION__)); | |||
2867 | ||||
2868 | Register Dst = MI.getOperand(0).getReg(); | |||
2869 | LLT Ty = MRI.getType(Dst); | |||
2870 | ||||
2871 | Register R; | |||
2872 | int64_t C1; | |||
2873 | int64_t C2; | |||
2874 | if (!mi_match( | |||
2875 | Dst, MRI, | |||
2876 | m_GAnd(m_GAnd(m_Reg(R), m_ICst(C1)), m_ICst(C2)))) | |||
2877 | return false; | |||
2878 | ||||
2879 | MatchInfo = [=](MachineIRBuilder &B) { | |||
2880 | if (C1 & C2) { | |||
2881 | B.buildAnd(Dst, R, B.buildConstant(Ty, C1 & C2)); | |||
2882 | return; | |||
2883 | } | |||
2884 | auto Zero = B.buildConstant(Ty, 0); | |||
2885 | replaceRegWith(MRI, Dst, Zero->getOperand(0).getReg()); | |||
2886 | }; | |||
2887 | return true; | |||
2888 | } | |||
2889 | ||||
2890 | bool CombinerHelper::matchRedundantAnd(MachineInstr &MI, | |||
2891 | Register &Replacement) { | |||
2892 | // Given | |||
2893 | // | |||
2894 | // %y:_(sN) = G_SOMETHING | |||
2895 | // %x:_(sN) = G_SOMETHING | |||
2896 | // %res:_(sN) = G_AND %x, %y | |||
2897 | // | |||
2898 | // Eliminate the G_AND when it is known that x & y == x or x & y == y. | |||
2899 | // | |||
2900 | // Patterns like this can appear as a result of legalization. E.g. | |||
2901 | // | |||
2902 | // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y | |||
2903 | // %one:_(s32) = G_CONSTANT i32 1 | |||
2904 | // %and:_(s32) = G_AND %cmp, %one | |||
2905 | // | |||
2906 | // In this case, G_ICMP only produces a single bit, so x & 1 == x. | |||
2907 | assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2907, __extension__ __PRETTY_FUNCTION__)); | |||
2908 | if (!KB) | |||
2909 | return false; | |||
2910 | ||||
2911 | Register AndDst = MI.getOperand(0).getReg(); | |||
2912 | Register LHS = MI.getOperand(1).getReg(); | |||
2913 | Register RHS = MI.getOperand(2).getReg(); | |||
2914 | KnownBits LHSBits = KB->getKnownBits(LHS); | |||
2915 | KnownBits RHSBits = KB->getKnownBits(RHS); | |||
2916 | ||||
2917 | // Check that x & Mask == x. | |||
2918 | // x & 1 == x, always | |||
2919 | // x & 0 == x, only if x is also 0 | |||
2920 | // Meaning Mask has no effect if every bit is either one in Mask or zero in x. | |||
2921 | // | |||
2922 | // Check if we can replace AndDst with the LHS of the G_AND | |||
2923 | if (canReplaceReg(AndDst, LHS, MRI) && | |||
2924 | (LHSBits.Zero | RHSBits.One).isAllOnes()) { | |||
2925 | Replacement = LHS; | |||
2926 | return true; | |||
2927 | } | |||
2928 | ||||
2929 | // Check if we can replace AndDst with the RHS of the G_AND | |||
2930 | if (canReplaceReg(AndDst, RHS, MRI) && | |||
2931 | (LHSBits.One | RHSBits.Zero).isAllOnes()) { | |||
2932 | Replacement = RHS; | |||
2933 | return true; | |||
2934 | } | |||
2935 | ||||
2936 | return false; | |||
2937 | } | |||
2938 | ||||
2939 | bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) { | |||
2940 | // Given | |||
2941 | // | |||
2942 | // %y:_(sN) = G_SOMETHING | |||
2943 | // %x:_(sN) = G_SOMETHING | |||
2944 | // %res:_(sN) = G_OR %x, %y | |||
2945 | // | |||
2946 | // Eliminate the G_OR when it is known that x | y == x or x | y == y. | |||
2947 | assert(MI.getOpcode() == TargetOpcode::G_OR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_OR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_OR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2947, __extension__ __PRETTY_FUNCTION__)); | |||
2948 | if (!KB) | |||
2949 | return false; | |||
2950 | ||||
2951 | Register OrDst = MI.getOperand(0).getReg(); | |||
2952 | Register LHS = MI.getOperand(1).getReg(); | |||
2953 | Register RHS = MI.getOperand(2).getReg(); | |||
2954 | KnownBits LHSBits = KB->getKnownBits(LHS); | |||
2955 | KnownBits RHSBits = KB->getKnownBits(RHS); | |||
2956 | ||||
2957 | // Check that x | Mask == x. | |||
2958 | // x | 0 == x, always | |||
2959 | // x | 1 == x, only if x is also 1 | |||
2960 | // Meaning Mask has no effect if every bit is either zero in Mask or one in x. | |||
2961 | // | |||
2962 | // Check if we can replace OrDst with the LHS of the G_OR | |||
2963 | if (canReplaceReg(OrDst, LHS, MRI) && | |||
2964 | (LHSBits.One | RHSBits.Zero).isAllOnes()) { | |||
2965 | Replacement = LHS; | |||
2966 | return true; | |||
2967 | } | |||
2968 | ||||
2969 | // Check if we can replace OrDst with the RHS of the G_OR | |||
2970 | if (canReplaceReg(OrDst, RHS, MRI) && | |||
2971 | (LHSBits.Zero | RHSBits.One).isAllOnes()) { | |||
2972 | Replacement = RHS; | |||
2973 | return true; | |||
2974 | } | |||
2975 | ||||
2976 | return false; | |||
2977 | } | |||
2978 | ||||
2979 | bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) { | |||
2980 | // If the input is already sign extended, just drop the extension. | |||
2981 | Register Src = MI.getOperand(1).getReg(); | |||
2982 | unsigned ExtBits = MI.getOperand(2).getImm(); | |||
2983 | unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits(); | |||
2984 | return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1); | |||
2985 | } | |||
2986 | ||||
2987 | static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits, | |||
2988 | int64_t Cst, bool IsVector, bool IsFP) { | |||
2989 | // For i1, Cst will always be -1 regardless of boolean contents. | |||
2990 | return (ScalarSizeBits == 1 && Cst == -1) || | |||
2991 | isConstTrueVal(TLI, Cst, IsVector, IsFP); | |||
2992 | } | |||
2993 | ||||
2994 | bool CombinerHelper::matchNotCmp(MachineInstr &MI, | |||
2995 | SmallVectorImpl<Register> &RegsToNegate) { | |||
2996 | assert(MI.getOpcode() == TargetOpcode::G_XOR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_XOR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_XOR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 2996, __extension__ __PRETTY_FUNCTION__)); | |||
2997 | LLT Ty = MRI.getType(MI.getOperand(0).getReg()); | |||
2998 | const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering(); | |||
2999 | Register XorSrc; | |||
3000 | Register CstReg; | |||
3001 | // We match xor(src, true) here. | |||
3002 | if (!mi_match(MI.getOperand(0).getReg(), MRI, | |||
3003 | m_GXor(m_Reg(XorSrc), m_Reg(CstReg)))) | |||
3004 | return false; | |||
3005 | ||||
3006 | if (!MRI.hasOneNonDBGUse(XorSrc)) | |||
3007 | return false; | |||
3008 | ||||
3009 | // Check that XorSrc is the root of a tree of comparisons combined with ANDs | |||
3010 | // and ORs. The suffix of RegsToNegate starting from index I is used a work | |||
3011 | // list of tree nodes to visit. | |||
3012 | RegsToNegate.push_back(XorSrc); | |||
3013 | // Remember whether the comparisons are all integer or all floating point. | |||
3014 | bool IsInt = false; | |||
3015 | bool IsFP = false; | |||
3016 | for (unsigned I = 0; I < RegsToNegate.size(); ++I) { | |||
3017 | Register Reg = RegsToNegate[I]; | |||
3018 | if (!MRI.hasOneNonDBGUse(Reg)) | |||
3019 | return false; | |||
3020 | MachineInstr *Def = MRI.getVRegDef(Reg); | |||
3021 | switch (Def->getOpcode()) { | |||
3022 | default: | |||
3023 | // Don't match if the tree contains anything other than ANDs, ORs and | |||
3024 | // comparisons. | |||
3025 | return false; | |||
3026 | case TargetOpcode::G_ICMP: | |||
3027 | if (IsFP) | |||
3028 | return false; | |||
3029 | IsInt = true; | |||
3030 | // When we apply the combine we will invert the predicate. | |||
3031 | break; | |||
3032 | case TargetOpcode::G_FCMP: | |||
3033 | if (IsInt) | |||
3034 | return false; | |||
3035 | IsFP = true; | |||
3036 | // When we apply the combine we will invert the predicate. | |||
3037 | break; | |||
3038 | case TargetOpcode::G_AND: | |||
3039 | case TargetOpcode::G_OR: | |||
3040 | // Implement De Morgan's laws: | |||
3041 | // ~(x & y) -> ~x | ~y | |||
3042 | // ~(x | y) -> ~x & ~y | |||
3043 | // When we apply the combine we will change the opcode and recursively | |||
3044 | // negate the operands. | |||
3045 | RegsToNegate.push_back(Def->getOperand(1).getReg()); | |||
3046 | RegsToNegate.push_back(Def->getOperand(2).getReg()); | |||
3047 | break; | |||
3048 | } | |||
3049 | } | |||
3050 | ||||
3051 | // Now we know whether the comparisons are integer or floating point, check | |||
3052 | // the constant in the xor. | |||
3053 | int64_t Cst; | |||
3054 | if (Ty.isVector()) { | |||
3055 | MachineInstr *CstDef = MRI.getVRegDef(CstReg); | |||
3056 | auto MaybeCst = getIConstantSplatSExtVal(*CstDef, MRI); | |||
3057 | if (!MaybeCst) | |||
3058 | return false; | |||
3059 | if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP)) | |||
3060 | return false; | |||
3061 | } else { | |||
3062 | if (!mi_match(CstReg, MRI, m_ICst(Cst))) | |||
3063 | return false; | |||
3064 | if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP)) | |||
3065 | return false; | |||
3066 | } | |||
3067 | ||||
3068 | return true; | |||
3069 | } | |||
3070 | ||||
3071 | void CombinerHelper::applyNotCmp(MachineInstr &MI, | |||
3072 | SmallVectorImpl<Register> &RegsToNegate) { | |||
3073 | for (Register Reg : RegsToNegate) { | |||
3074 | MachineInstr *Def = MRI.getVRegDef(Reg); | |||
3075 | Observer.changingInstr(*Def); | |||
3076 | // For each comparison, invert the opcode. For each AND and OR, change the | |||
3077 | // opcode. | |||
3078 | switch (Def->getOpcode()) { | |||
3079 | default: | |||
3080 | llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp" , 3080); | |||
3081 | case TargetOpcode::G_ICMP: | |||
3082 | case TargetOpcode::G_FCMP: { | |||
3083 | MachineOperand &PredOp = Def->getOperand(1); | |||
3084 | CmpInst::Predicate NewP = CmpInst::getInversePredicate( | |||
3085 | (CmpInst::Predicate)PredOp.getPredicate()); | |||
3086 | PredOp.setPredicate(NewP); | |||
3087 | break; | |||
3088 | } | |||
3089 | case TargetOpcode::G_AND: | |||
3090 | Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR)); | |||
3091 | break; | |||
3092 | case TargetOpcode::G_OR: | |||
3093 | Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND)); | |||
3094 | break; | |||
3095 | } | |||
3096 | Observer.changedInstr(*Def); | |||
3097 | } | |||
3098 | ||||
3099 | replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg()); | |||
3100 | MI.eraseFromParent(); | |||
3101 | } | |||
3102 | ||||
3103 | bool CombinerHelper::matchXorOfAndWithSameReg( | |||
3104 | MachineInstr &MI, std::pair<Register, Register> &MatchInfo) { | |||
3105 | // Match (xor (and x, y), y) (or any of its commuted cases) | |||
3106 | assert(MI.getOpcode() == TargetOpcode::G_XOR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_XOR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_XOR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3106, __extension__ __PRETTY_FUNCTION__)); | |||
3107 | Register &X = MatchInfo.first; | |||
3108 | Register &Y = MatchInfo.second; | |||
3109 | Register AndReg = MI.getOperand(1).getReg(); | |||
3110 | Register SharedReg = MI.getOperand(2).getReg(); | |||
3111 | ||||
3112 | // Find a G_AND on either side of the G_XOR. | |||
3113 | // Look for one of | |||
3114 | // | |||
3115 | // (xor (and x, y), SharedReg) | |||
3116 | // (xor SharedReg, (and x, y)) | |||
3117 | if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) { | |||
3118 | std::swap(AndReg, SharedReg); | |||
3119 | if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) | |||
3120 | return false; | |||
3121 | } | |||
3122 | ||||
3123 | // Only do this if we'll eliminate the G_AND. | |||
3124 | if (!MRI.hasOneNonDBGUse(AndReg)) | |||
3125 | return false; | |||
3126 | ||||
3127 | // We can combine if SharedReg is the same as either the LHS or RHS of the | |||
3128 | // G_AND. | |||
3129 | if (Y != SharedReg) | |||
3130 | std::swap(X, Y); | |||
3131 | return Y == SharedReg; | |||
3132 | } | |||
3133 | ||||
3134 | void CombinerHelper::applyXorOfAndWithSameReg( | |||
3135 | MachineInstr &MI, std::pair<Register, Register> &MatchInfo) { | |||
3136 | // Fold (xor (and x, y), y) -> (and (not x), y) | |||
3137 | Builder.setInstrAndDebugLoc(MI); | |||
3138 | Register X, Y; | |||
3139 | std::tie(X, Y) = MatchInfo; | |||
3140 | auto Not = Builder.buildNot(MRI.getType(X), X); | |||
3141 | Observer.changingInstr(MI); | |||
3142 | MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND)); | |||
3143 | MI.getOperand(1).setReg(Not->getOperand(0).getReg()); | |||
3144 | MI.getOperand(2).setReg(Y); | |||
3145 | Observer.changedInstr(MI); | |||
3146 | } | |||
3147 | ||||
3148 | bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) { | |||
3149 | auto &PtrAdd = cast<GPtrAdd>(MI); | |||
3150 | Register DstReg = PtrAdd.getReg(0); | |||
3151 | LLT Ty = MRI.getType(DstReg); | |||
3152 | const DataLayout &DL = Builder.getMF().getDataLayout(); | |||
3153 | ||||
3154 | if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace())) | |||
3155 | return false; | |||
3156 | ||||
3157 | if (Ty.isPointer()) { | |||
3158 | auto ConstVal = getIConstantVRegVal(PtrAdd.getBaseReg(), MRI); | |||
3159 | return ConstVal && *ConstVal == 0; | |||
3160 | } | |||
3161 | ||||
3162 | assert(Ty.isVector() && "Expecting a vector type")(static_cast <bool> (Ty.isVector() && "Expecting a vector type" ) ? void (0) : __assert_fail ("Ty.isVector() && \"Expecting a vector type\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3162, __extension__ __PRETTY_FUNCTION__)); | |||
3163 | const MachineInstr *VecMI = MRI.getVRegDef(PtrAdd.getBaseReg()); | |||
3164 | return isBuildVectorAllZeros(*VecMI, MRI); | |||
3165 | } | |||
3166 | ||||
3167 | void CombinerHelper::applyPtrAddZero(MachineInstr &MI) { | |||
3168 | auto &PtrAdd = cast<GPtrAdd>(MI); | |||
3169 | Builder.setInstrAndDebugLoc(PtrAdd); | |||
3170 | Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg()); | |||
3171 | PtrAdd.eraseFromParent(); | |||
3172 | } | |||
3173 | ||||
3174 | /// The second source operand is known to be a power of 2. | |||
3175 | void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) { | |||
3176 | Register DstReg = MI.getOperand(0).getReg(); | |||
3177 | Register Src0 = MI.getOperand(1).getReg(); | |||
3178 | Register Pow2Src1 = MI.getOperand(2).getReg(); | |||
3179 | LLT Ty = MRI.getType(DstReg); | |||
3180 | Builder.setInstrAndDebugLoc(MI); | |||
3181 | ||||
3182 | // Fold (urem x, pow2) -> (and x, pow2-1) | |||
3183 | auto NegOne = Builder.buildConstant(Ty, -1); | |||
3184 | auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne); | |||
3185 | Builder.buildAnd(DstReg, Src0, Add); | |||
3186 | MI.eraseFromParent(); | |||
3187 | } | |||
3188 | ||||
3189 | bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI, | |||
3190 | unsigned &SelectOpNo) { | |||
3191 | Register LHS = MI.getOperand(1).getReg(); | |||
3192 | Register RHS = MI.getOperand(2).getReg(); | |||
3193 | ||||
3194 | Register OtherOperandReg = RHS; | |||
3195 | SelectOpNo = 1; | |||
3196 | MachineInstr *Select = MRI.getVRegDef(LHS); | |||
3197 | ||||
3198 | // Don't do this unless the old select is going away. We want to eliminate the | |||
3199 | // binary operator, not replace a binop with a select. | |||
3200 | if (Select->getOpcode() != TargetOpcode::G_SELECT || | |||
3201 | !MRI.hasOneNonDBGUse(LHS)) { | |||
3202 | OtherOperandReg = LHS; | |||
3203 | SelectOpNo = 2; | |||
3204 | Select = MRI.getVRegDef(RHS); | |||
3205 | if (Select->getOpcode() != TargetOpcode::G_SELECT || | |||
3206 | !MRI.hasOneNonDBGUse(RHS)) | |||
3207 | return false; | |||
3208 | } | |||
3209 | ||||
3210 | MachineInstr *SelectLHS = MRI.getVRegDef(Select->getOperand(2).getReg()); | |||
3211 | MachineInstr *SelectRHS = MRI.getVRegDef(Select->getOperand(3).getReg()); | |||
3212 | ||||
3213 | if (!isConstantOrConstantVector(*SelectLHS, MRI, | |||
3214 | /*AllowFP*/ true, | |||
3215 | /*AllowOpaqueConstants*/ false)) | |||
3216 | return false; | |||
3217 | if (!isConstantOrConstantVector(*SelectRHS, MRI, | |||
3218 | /*AllowFP*/ true, | |||
3219 | /*AllowOpaqueConstants*/ false)) | |||
3220 | return false; | |||
3221 | ||||
3222 | unsigned BinOpcode = MI.getOpcode(); | |||
3223 | ||||
3224 | // We know know one of the operands is a select of constants. Now verify that | |||
3225 | // the other binary operator operand is either a constant, or we can handle a | |||
3226 | // variable. | |||
3227 | bool CanFoldNonConst = | |||
3228 | (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) && | |||
3229 | (isNullOrNullSplat(*SelectLHS, MRI) || | |||
3230 | isAllOnesOrAllOnesSplat(*SelectLHS, MRI)) && | |||
3231 | (isNullOrNullSplat(*SelectRHS, MRI) || | |||
3232 | isAllOnesOrAllOnesSplat(*SelectRHS, MRI)); | |||
3233 | if (CanFoldNonConst) | |||
3234 | return true; | |||
3235 | ||||
3236 | return isConstantOrConstantVector(*MRI.getVRegDef(OtherOperandReg), MRI, | |||
3237 | /*AllowFP*/ true, | |||
3238 | /*AllowOpaqueConstants*/ false); | |||
3239 | } | |||
3240 | ||||
3241 | /// \p SelectOperand is the operand in binary operator \p MI that is the select | |||
3242 | /// to fold. | |||
3243 | bool CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI, | |||
3244 | const unsigned &SelectOperand) { | |||
3245 | Builder.setInstrAndDebugLoc(MI); | |||
3246 | ||||
3247 | Register Dst = MI.getOperand(0).getReg(); | |||
3248 | Register LHS = MI.getOperand(1).getReg(); | |||
3249 | Register RHS = MI.getOperand(2).getReg(); | |||
3250 | MachineInstr *Select = MRI.getVRegDef(MI.getOperand(SelectOperand).getReg()); | |||
3251 | ||||
3252 | Register SelectCond = Select->getOperand(1).getReg(); | |||
3253 | Register SelectTrue = Select->getOperand(2).getReg(); | |||
3254 | Register SelectFalse = Select->getOperand(3).getReg(); | |||
3255 | ||||
3256 | LLT Ty = MRI.getType(Dst); | |||
3257 | unsigned BinOpcode = MI.getOpcode(); | |||
3258 | ||||
3259 | Register FoldTrue, FoldFalse; | |||
3260 | ||||
3261 | // We have a select-of-constants followed by a binary operator with a | |||
3262 | // constant. Eliminate the binop by pulling the constant math into the select. | |||
3263 | // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO | |||
3264 | if (SelectOperand == 1) { | |||
3265 | // TODO: SelectionDAG verifies this actually constant folds before | |||
3266 | // committing to the combine. | |||
3267 | ||||
3268 | FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {SelectTrue, RHS}).getReg(0); | |||
3269 | FoldFalse = | |||
3270 | Builder.buildInstr(BinOpcode, {Ty}, {SelectFalse, RHS}).getReg(0); | |||
3271 | } else { | |||
3272 | FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectTrue}).getReg(0); | |||
3273 | FoldFalse = | |||
3274 | Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectFalse}).getReg(0); | |||
3275 | } | |||
3276 | ||||
3277 | Builder.buildSelect(Dst, SelectCond, FoldTrue, FoldFalse, MI.getFlags()); | |||
3278 | MI.eraseFromParent(); | |||
3279 | ||||
3280 | return true; | |||
3281 | } | |||
3282 | ||||
3283 | std::optional<SmallVector<Register, 8>> | |||
3284 | CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const { | |||
3285 | assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!")(static_cast <bool> (Root->getOpcode() == TargetOpcode ::G_OR && "Expected G_OR only!") ? void (0) : __assert_fail ("Root->getOpcode() == TargetOpcode::G_OR && \"Expected G_OR only!\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3285, __extension__ __PRETTY_FUNCTION__)); | |||
3286 | // We want to detect if Root is part of a tree which represents a bunch | |||
3287 | // of loads being merged into a larger load. We'll try to recognize patterns | |||
3288 | // like, for example: | |||
3289 | // | |||
3290 | // Reg Reg | |||
3291 | // \ / | |||
3292 | // OR_1 Reg | |||
3293 | // \ / | |||
3294 | // OR_2 | |||
3295 | // \ Reg | |||
3296 | // .. / | |||
3297 | // Root | |||
3298 | // | |||
3299 | // Reg Reg Reg Reg | |||
3300 | // \ / \ / | |||
3301 | // OR_1 OR_2 | |||
3302 | // \ / | |||
3303 | // \ / | |||
3304 | // ... | |||
3305 | // Root | |||
3306 | // | |||
3307 | // Each "Reg" may have been produced by a load + some arithmetic. This | |||
3308 | // function will save each of them. | |||
3309 | SmallVector<Register, 8> RegsToVisit; | |||
3310 | SmallVector<const MachineInstr *, 7> Ors = {Root}; | |||
3311 | ||||
3312 | // In the "worst" case, we're dealing with a load for each byte. So, there | |||
3313 | // are at most #bytes - 1 ORs. | |||
3314 | const unsigned MaxIter = | |||
3315 | MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1; | |||
3316 | for (unsigned Iter = 0; Iter < MaxIter; ++Iter) { | |||
3317 | if (Ors.empty()) | |||
3318 | break; | |||
3319 | const MachineInstr *Curr = Ors.pop_back_val(); | |||
3320 | Register OrLHS = Curr->getOperand(1).getReg(); | |||
3321 | Register OrRHS = Curr->getOperand(2).getReg(); | |||
3322 | ||||
3323 | // In the combine, we want to elimate the entire tree. | |||
3324 | if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS)) | |||
3325 | return std::nullopt; | |||
3326 | ||||
3327 | // If it's a G_OR, save it and continue to walk. If it's not, then it's | |||
3328 | // something that may be a load + arithmetic. | |||
3329 | if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI)) | |||
3330 | Ors.push_back(Or); | |||
3331 | else | |||
3332 | RegsToVisit.push_back(OrLHS); | |||
3333 | if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI)) | |||
3334 | Ors.push_back(Or); | |||
3335 | else | |||
3336 | RegsToVisit.push_back(OrRHS); | |||
3337 | } | |||
3338 | ||||
3339 | // We're going to try and merge each register into a wider power-of-2 type, | |||
3340 | // so we ought to have an even number of registers. | |||
3341 | if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0) | |||
3342 | return std::nullopt; | |||
3343 | return RegsToVisit; | |||
3344 | } | |||
3345 | ||||
3346 | /// Helper function for findLoadOffsetsForLoadOrCombine. | |||
3347 | /// | |||
3348 | /// Check if \p Reg is the result of loading a \p MemSizeInBits wide value, | |||
3349 | /// and then moving that value into a specific byte offset. | |||
3350 | /// | |||
3351 | /// e.g. x[i] << 24 | |||
3352 | /// | |||
3353 | /// \returns The load instruction and the byte offset it is moved into. | |||
3354 | static std::optional<std::pair<GZExtLoad *, int64_t>> | |||
3355 | matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits, | |||
3356 | const MachineRegisterInfo &MRI) { | |||
3357 | assert(MRI.hasOneNonDBGUse(Reg) &&(static_cast <bool> (MRI.hasOneNonDBGUse(Reg) && "Expected Reg to only have one non-debug use?") ? void (0) : __assert_fail ("MRI.hasOneNonDBGUse(Reg) && \"Expected Reg to only have one non-debug use?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3358, __extension__ __PRETTY_FUNCTION__)) | |||
3358 | "Expected Reg to only have one non-debug use?")(static_cast <bool> (MRI.hasOneNonDBGUse(Reg) && "Expected Reg to only have one non-debug use?") ? void (0) : __assert_fail ("MRI.hasOneNonDBGUse(Reg) && \"Expected Reg to only have one non-debug use?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3358, __extension__ __PRETTY_FUNCTION__)); | |||
3359 | Register MaybeLoad; | |||
3360 | int64_t Shift; | |||
3361 | if (!mi_match(Reg, MRI, | |||
3362 | m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) { | |||
3363 | Shift = 0; | |||
3364 | MaybeLoad = Reg; | |||
3365 | } | |||
3366 | ||||
3367 | if (Shift % MemSizeInBits != 0) | |||
3368 | return std::nullopt; | |||
3369 | ||||
3370 | // TODO: Handle other types of loads. | |||
3371 | auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI); | |||
3372 | if (!Load) | |||
3373 | return std::nullopt; | |||
3374 | ||||
3375 | if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits) | |||
3376 | return std::nullopt; | |||
3377 | ||||
3378 | return std::make_pair(Load, Shift / MemSizeInBits); | |||
3379 | } | |||
3380 | ||||
3381 | std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>> | |||
3382 | CombinerHelper::findLoadOffsetsForLoadOrCombine( | |||
3383 | SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, | |||
3384 | const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) { | |||
3385 | ||||
3386 | // Each load found for the pattern. There should be one for each RegsToVisit. | |||
3387 | SmallSetVector<const MachineInstr *, 8> Loads; | |||
3388 | ||||
3389 | // The lowest index used in any load. (The lowest "i" for each x[i].) | |||
3390 | int64_t LowestIdx = INT64_MAX(9223372036854775807L); | |||
3391 | ||||
3392 | // The load which uses the lowest index. | |||
3393 | GZExtLoad *LowestIdxLoad = nullptr; | |||
3394 | ||||
3395 | // Keeps track of the load indices we see. We shouldn't see any indices twice. | |||
3396 | SmallSet<int64_t, 8> SeenIdx; | |||
3397 | ||||
3398 | // Ensure each load is in the same MBB. | |||
3399 | // TODO: Support multiple MachineBasicBlocks. | |||
3400 | MachineBasicBlock *MBB = nullptr; | |||
3401 | const MachineMemOperand *MMO = nullptr; | |||
3402 | ||||
3403 | // Earliest instruction-order load in the pattern. | |||
3404 | GZExtLoad *EarliestLoad = nullptr; | |||
3405 | ||||
3406 | // Latest instruction-order load in the pattern. | |||
3407 | GZExtLoad *LatestLoad = nullptr; | |||
3408 | ||||
3409 | // Base pointer which every load should share. | |||
3410 | Register BasePtr; | |||
3411 | ||||
3412 | // We want to find a load for each register. Each load should have some | |||
3413 | // appropriate bit twiddling arithmetic. During this loop, we will also keep | |||
3414 | // track of the load which uses the lowest index. Later, we will check if we | |||
3415 | // can use its pointer in the final, combined load. | |||
3416 | for (auto Reg : RegsToVisit) { | |||
3417 | // Find the load, and find the position that it will end up in (e.g. a | |||
3418 | // shifted) value. | |||
3419 | auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI); | |||
3420 | if (!LoadAndPos) | |||
3421 | return std::nullopt; | |||
3422 | GZExtLoad *Load; | |||
3423 | int64_t DstPos; | |||
3424 | std::tie(Load, DstPos) = *LoadAndPos; | |||
3425 | ||||
3426 | // TODO: Handle multiple MachineBasicBlocks. Currently not handled because | |||
3427 | // it is difficult to check for stores/calls/etc between loads. | |||
3428 | MachineBasicBlock *LoadMBB = Load->getParent(); | |||
3429 | if (!MBB) | |||
3430 | MBB = LoadMBB; | |||
3431 | if (LoadMBB != MBB) | |||
3432 | return std::nullopt; | |||
3433 | ||||
3434 | // Make sure that the MachineMemOperands of every seen load are compatible. | |||
3435 | auto &LoadMMO = Load->getMMO(); | |||
3436 | if (!MMO) | |||
3437 | MMO = &LoadMMO; | |||
3438 | if (MMO->getAddrSpace() != LoadMMO.getAddrSpace()) | |||
3439 | return std::nullopt; | |||
3440 | ||||
3441 | // Find out what the base pointer and index for the load is. | |||
3442 | Register LoadPtr; | |||
3443 | int64_t Idx; | |||
3444 | if (!mi_match(Load->getOperand(1).getReg(), MRI, | |||
3445 | m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) { | |||
3446 | LoadPtr = Load->getOperand(1).getReg(); | |||
3447 | Idx = 0; | |||
3448 | } | |||
3449 | ||||
3450 | // Don't combine things like a[i], a[i] -> a bigger load. | |||
3451 | if (!SeenIdx.insert(Idx).second) | |||
3452 | return std::nullopt; | |||
3453 | ||||
3454 | // Every load must share the same base pointer; don't combine things like: | |||
3455 | // | |||
3456 | // a[i], b[i + 1] -> a bigger load. | |||
3457 | if (!BasePtr.isValid()) | |||
3458 | BasePtr = LoadPtr; | |||
3459 | if (BasePtr != LoadPtr) | |||
3460 | return std::nullopt; | |||
3461 | ||||
3462 | if (Idx < LowestIdx) { | |||
3463 | LowestIdx = Idx; | |||
3464 | LowestIdxLoad = Load; | |||
3465 | } | |||
3466 | ||||
3467 | // Keep track of the byte offset that this load ends up at. If we have seen | |||
3468 | // the byte offset, then stop here. We do not want to combine: | |||
3469 | // | |||
3470 | // a[i] << 16, a[i + k] << 16 -> a bigger load. | |||
3471 | if (!MemOffset2Idx.try_emplace(DstPos, Idx).second) | |||
3472 | return std::nullopt; | |||
3473 | Loads.insert(Load); | |||
3474 | ||||
3475 | // Keep track of the position of the earliest/latest loads in the pattern. | |||
3476 | // We will check that there are no load fold barriers between them later | |||
3477 | // on. | |||
3478 | // | |||
3479 | // FIXME: Is there a better way to check for load fold barriers? | |||
3480 | if (!EarliestLoad || dominates(*Load, *EarliestLoad)) | |||
3481 | EarliestLoad = Load; | |||
3482 | if (!LatestLoad || dominates(*LatestLoad, *Load)) | |||
3483 | LatestLoad = Load; | |||
3484 | } | |||
3485 | ||||
3486 | // We found a load for each register. Let's check if each load satisfies the | |||
3487 | // pattern. | |||
3488 | assert(Loads.size() == RegsToVisit.size() &&(static_cast <bool> (Loads.size() == RegsToVisit.size() && "Expected to find a load for each register?") ? void (0) : __assert_fail ("Loads.size() == RegsToVisit.size() && \"Expected to find a load for each register?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3489, __extension__ __PRETTY_FUNCTION__)) | |||
3489 | "Expected to find a load for each register?")(static_cast <bool> (Loads.size() == RegsToVisit.size() && "Expected to find a load for each register?") ? void (0) : __assert_fail ("Loads.size() == RegsToVisit.size() && \"Expected to find a load for each register?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3489, __extension__ __PRETTY_FUNCTION__)); | |||
3490 | assert(EarliestLoad != LatestLoad && EarliestLoad &&(static_cast <bool> (EarliestLoad != LatestLoad && EarliestLoad && LatestLoad && "Expected at least two loads?" ) ? void (0) : __assert_fail ("EarliestLoad != LatestLoad && EarliestLoad && LatestLoad && \"Expected at least two loads?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3491, __extension__ __PRETTY_FUNCTION__)) | |||
3491 | LatestLoad && "Expected at least two loads?")(static_cast <bool> (EarliestLoad != LatestLoad && EarliestLoad && LatestLoad && "Expected at least two loads?" ) ? void (0) : __assert_fail ("EarliestLoad != LatestLoad && EarliestLoad && LatestLoad && \"Expected at least two loads?\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3491, __extension__ __PRETTY_FUNCTION__)); | |||
3492 | ||||
3493 | // Check if there are any stores, calls, etc. between any of the loads. If | |||
3494 | // there are, then we can't safely perform the combine. | |||
3495 | // | |||
3496 | // MaxIter is chosen based off the (worst case) number of iterations it | |||
3497 | // typically takes to succeed in the LLVM test suite plus some padding. | |||
3498 | // | |||
3499 | // FIXME: Is there a better way to check for load fold barriers? | |||
3500 | const unsigned MaxIter = 20; | |||
3501 | unsigned Iter = 0; | |||
3502 | for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(), | |||
3503 | LatestLoad->getIterator())) { | |||
3504 | if (Loads.count(&MI)) | |||
3505 | continue; | |||
3506 | if (MI.isLoadFoldBarrier()) | |||
3507 | return std::nullopt; | |||
3508 | if (Iter++ == MaxIter) | |||
3509 | return std::nullopt; | |||
3510 | } | |||
3511 | ||||
3512 | return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad); | |||
3513 | } | |||
3514 | ||||
3515 | bool CombinerHelper::matchLoadOrCombine( | |||
3516 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
3517 | assert(MI.getOpcode() == TargetOpcode::G_OR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_OR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_OR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3517, __extension__ __PRETTY_FUNCTION__)); | |||
3518 | MachineFunction &MF = *MI.getMF(); | |||
3519 | // Assuming a little-endian target, transform: | |||
3520 | // s8 *a = ... | |||
3521 | // s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24) | |||
3522 | // => | |||
3523 | // s32 val = *((i32)a) | |||
3524 | // | |||
3525 | // s8 *a = ... | |||
3526 | // s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3] | |||
3527 | // => | |||
3528 | // s32 val = BSWAP(*((s32)a)) | |||
3529 | Register Dst = MI.getOperand(0).getReg(); | |||
3530 | LLT Ty = MRI.getType(Dst); | |||
3531 | if (Ty.isVector()) | |||
3532 | return false; | |||
3533 | ||||
3534 | // We need to combine at least two loads into this type. Since the smallest | |||
3535 | // possible load is into a byte, we need at least a 16-bit wide type. | |||
3536 | const unsigned WideMemSizeInBits = Ty.getSizeInBits(); | |||
3537 | if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0) | |||
3538 | return false; | |||
3539 | ||||
3540 | // Match a collection of non-OR instructions in the pattern. | |||
3541 | auto RegsToVisit = findCandidatesForLoadOrCombine(&MI); | |||
3542 | if (!RegsToVisit) | |||
3543 | return false; | |||
3544 | ||||
3545 | // We have a collection of non-OR instructions. Figure out how wide each of | |||
3546 | // the small loads should be based off of the number of potential loads we | |||
3547 | // found. | |||
3548 | const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size(); | |||
3549 | if (NarrowMemSizeInBits % 8 != 0) | |||
3550 | return false; | |||
3551 | ||||
3552 | // Check if each register feeding into each OR is a load from the same | |||
3553 | // base pointer + some arithmetic. | |||
3554 | // | |||
3555 | // e.g. a[0], a[1] << 8, a[2] << 16, etc. | |||
3556 | // | |||
3557 | // Also verify that each of these ends up putting a[i] into the same memory | |||
3558 | // offset as a load into a wide type would. | |||
3559 | SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx; | |||
3560 | GZExtLoad *LowestIdxLoad, *LatestLoad; | |||
3561 | int64_t LowestIdx; | |||
3562 | auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine( | |||
3563 | MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits); | |||
3564 | if (!MaybeLoadInfo) | |||
3565 | return false; | |||
3566 | std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo; | |||
3567 | ||||
3568 | // We have a bunch of loads being OR'd together. Using the addresses + offsets | |||
3569 | // we found before, check if this corresponds to a big or little endian byte | |||
3570 | // pattern. If it does, then we can represent it using a load + possibly a | |||
3571 | // BSWAP. | |||
3572 | bool IsBigEndianTarget = MF.getDataLayout().isBigEndian(); | |||
3573 | std::optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx); | |||
3574 | if (!IsBigEndian) | |||
3575 | return false; | |||
3576 | bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian; | |||
3577 | if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}})) | |||
3578 | return false; | |||
3579 | ||||
3580 | // Make sure that the load from the lowest index produces offset 0 in the | |||
3581 | // final value. | |||
3582 | // | |||
3583 | // This ensures that we won't combine something like this: | |||
3584 | // | |||
3585 | // load x[i] -> byte 2 | |||
3586 | // load x[i+1] -> byte 0 ---> wide_load x[i] | |||
3587 | // load x[i+2] -> byte 1 | |||
3588 | const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits; | |||
3589 | const unsigned ZeroByteOffset = | |||
3590 | *IsBigEndian | |||
3591 | ? bigEndianByteAt(NumLoadsInTy, 0) | |||
3592 | : littleEndianByteAt(NumLoadsInTy, 0); | |||
3593 | auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset); | |||
3594 | if (ZeroOffsetIdx == MemOffset2Idx.end() || | |||
3595 | ZeroOffsetIdx->second != LowestIdx) | |||
3596 | return false; | |||
3597 | ||||
3598 | // We wil reuse the pointer from the load which ends up at byte offset 0. It | |||
3599 | // may not use index 0. | |||
3600 | Register Ptr = LowestIdxLoad->getPointerReg(); | |||
3601 | const MachineMemOperand &MMO = LowestIdxLoad->getMMO(); | |||
3602 | LegalityQuery::MemDesc MMDesc(MMO); | |||
3603 | MMDesc.MemoryTy = Ty; | |||
3604 | if (!isLegalOrBeforeLegalizer( | |||
3605 | {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}})) | |||
3606 | return false; | |||
3607 | auto PtrInfo = MMO.getPointerInfo(); | |||
3608 | auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8); | |||
3609 | ||||
3610 | // Load must be allowed and fast on the target. | |||
3611 | LLVMContext &C = MF.getFunction().getContext(); | |||
3612 | auto &DL = MF.getDataLayout(); | |||
3613 | unsigned Fast = 0; | |||
3614 | if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) || | |||
3615 | !Fast) | |||
3616 | return false; | |||
3617 | ||||
3618 | MatchInfo = [=](MachineIRBuilder &MIB) { | |||
3619 | MIB.setInstrAndDebugLoc(*LatestLoad); | |||
3620 | Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst; | |||
3621 | MIB.buildLoad(LoadDst, Ptr, *NewMMO); | |||
3622 | if (NeedsBSwap) | |||
3623 | MIB.buildBSwap(Dst, LoadDst); | |||
3624 | }; | |||
3625 | return true; | |||
3626 | } | |||
3627 | ||||
3628 | /// Check if the store \p Store is a truncstore that can be merged. That is, | |||
3629 | /// it's a store of a shifted value of \p SrcVal. If \p SrcVal is an empty | |||
3630 | /// Register then it does not need to match and SrcVal is set to the source | |||
3631 | /// value found. | |||
3632 | /// On match, returns the start byte offset of the \p SrcVal that is being | |||
3633 | /// stored. | |||
3634 | static std::optional<int64_t> | |||
3635 | getTruncStoreByteOffset(GStore &Store, Register &SrcVal, | |||
3636 | MachineRegisterInfo &MRI) { | |||
3637 | Register TruncVal; | |||
3638 | if (!mi_match(Store.getValueReg(), MRI, m_GTrunc(m_Reg(TruncVal)))) | |||
3639 | return std::nullopt; | |||
3640 | ||||
3641 | // The shift amount must be a constant multiple of the narrow type. | |||
3642 | // It is translated to the offset address in the wide source value "y". | |||
3643 | // | |||
3644 | // x = G_LSHR y, ShiftAmtC | |||
3645 | // s8 z = G_TRUNC x | |||
3646 | // store z, ... | |||
3647 | Register FoundSrcVal; | |||
3648 | int64_t ShiftAmt; | |||
3649 | if (!mi_match(TruncVal, MRI, | |||
3650 | m_any_of(m_GLShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt)), | |||
3651 | m_GAShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt))))) { | |||
3652 | if (!SrcVal.isValid() || TruncVal == SrcVal) { | |||
3653 | if (!SrcVal.isValid()) | |||
3654 | SrcVal = TruncVal; | |||
3655 | return 0; // If it's the lowest index store. | |||
3656 | } | |||
3657 | return std::nullopt; | |||
3658 | } | |||
3659 | ||||
3660 | unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits(); | |||
3661 | if (ShiftAmt % NarrowBits!= 0) | |||
3662 | return std::nullopt; | |||
3663 | const unsigned Offset = ShiftAmt / NarrowBits; | |||
3664 | ||||
3665 | if (SrcVal.isValid() && FoundSrcVal != SrcVal) | |||
3666 | return std::nullopt; | |||
3667 | ||||
3668 | if (!SrcVal.isValid()) | |||
3669 | SrcVal = FoundSrcVal; | |||
3670 | else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal)) | |||
3671 | return std::nullopt; | |||
3672 | return Offset; | |||
3673 | } | |||
3674 | ||||
3675 | /// Match a pattern where a wide type scalar value is stored by several narrow | |||
3676 | /// stores. Fold it into a single store or a BSWAP and a store if the targets | |||
3677 | /// supports it. | |||
3678 | /// | |||
3679 | /// Assuming little endian target: | |||
3680 | /// i8 *p = ... | |||
3681 | /// i32 val = ... | |||
3682 | /// p[0] = (val >> 0) & 0xFF; | |||
3683 | /// p[1] = (val >> 8) & 0xFF; | |||
3684 | /// p[2] = (val >> 16) & 0xFF; | |||
3685 | /// p[3] = (val >> 24) & 0xFF; | |||
3686 | /// => | |||
3687 | /// *((i32)p) = val; | |||
3688 | /// | |||
3689 | /// i8 *p = ... | |||
3690 | /// i32 val = ... | |||
3691 | /// p[0] = (val >> 24) & 0xFF; | |||
3692 | /// p[1] = (val >> 16) & 0xFF; | |||
3693 | /// p[2] = (val >> 8) & 0xFF; | |||
3694 | /// p[3] = (val >> 0) & 0xFF; | |||
3695 | /// => | |||
3696 | /// *((i32)p) = BSWAP(val); | |||
3697 | bool CombinerHelper::matchTruncStoreMerge(MachineInstr &MI, | |||
3698 | MergeTruncStoresInfo &MatchInfo) { | |||
3699 | auto &StoreMI = cast<GStore>(MI); | |||
3700 | LLT MemTy = StoreMI.getMMO().getMemoryType(); | |||
3701 | ||||
3702 | // We only handle merging simple stores of 1-4 bytes. | |||
3703 | if (!MemTy.isScalar()) | |||
3704 | return false; | |||
3705 | switch (MemTy.getSizeInBits()) { | |||
3706 | case 8: | |||
3707 | case 16: | |||
3708 | case 32: | |||
3709 | break; | |||
3710 | default: | |||
3711 | return false; | |||
3712 | } | |||
3713 | if (!StoreMI.isSimple()) | |||
3714 | return false; | |||
3715 | ||||
3716 | // We do a simple search for mergeable stores prior to this one. | |||
3717 | // Any potential alias hazard along the way terminates the search. | |||
3718 | SmallVector<GStore *> FoundStores; | |||
3719 | ||||
3720 | // We're looking for: | |||
3721 | // 1) a (store(trunc(...))) | |||
3722 | // 2) of an LSHR/ASHR of a single wide value, by the appropriate shift to get | |||
3723 | // the partial value stored. | |||
3724 | // 3) where the offsets form either a little or big-endian sequence. | |||
3725 | ||||
3726 | auto &LastStore = StoreMI; | |||
3727 | ||||
3728 | // The single base pointer that all stores must use. | |||
3729 | Register BaseReg; | |||
3730 | int64_t LastOffset; | |||
3731 | if (!mi_match(LastStore.getPointerReg(), MRI, | |||
3732 | m_GPtrAdd(m_Reg(BaseReg), m_ICst(LastOffset)))) { | |||
3733 | BaseReg = LastStore.getPointerReg(); | |||
3734 | LastOffset = 0; | |||
3735 | } | |||
3736 | ||||
3737 | GStore *LowestIdxStore = &LastStore; | |||
3738 | int64_t LowestIdxOffset = LastOffset; | |||
3739 | ||||
3740 | Register WideSrcVal; | |||
3741 | auto LowestShiftAmt = getTruncStoreByteOffset(LastStore, WideSrcVal, MRI); | |||
3742 | if (!LowestShiftAmt) | |||
3743 | return false; // Didn't match a trunc. | |||
3744 | assert(WideSrcVal.isValid())(static_cast <bool> (WideSrcVal.isValid()) ? void (0) : __assert_fail ("WideSrcVal.isValid()", "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp" , 3744, __extension__ __PRETTY_FUNCTION__)); | |||
3745 | ||||
3746 | LLT WideStoreTy = MRI.getType(WideSrcVal); | |||
3747 | // The wide type might not be a multiple of the memory type, e.g. s48 and s32. | |||
3748 | if (WideStoreTy.getSizeInBits() % MemTy.getSizeInBits() != 0) | |||
3749 | return false; | |||
3750 | const unsigned NumStoresRequired = | |||
3751 | WideStoreTy.getSizeInBits() / MemTy.getSizeInBits(); | |||
3752 | ||||
3753 | SmallVector<int64_t, 8> OffsetMap(NumStoresRequired, INT64_MAX(9223372036854775807L)); | |||
3754 | OffsetMap[*LowestShiftAmt] = LastOffset; | |||
3755 | FoundStores.emplace_back(&LastStore); | |||
3756 | ||||
3757 | // Search the block up for more stores. | |||
3758 | // We use a search threshold of 10 instructions here because the combiner | |||
3759 | // works top-down within a block, and we don't want to search an unbounded | |||
3760 | // number of predecessor instructions trying to find matching stores. | |||
3761 | // If we moved this optimization into a separate pass then we could probably | |||
3762 | // use a more efficient search without having a hard-coded threshold. | |||
3763 | const int MaxInstsToCheck = 10; | |||
3764 | int NumInstsChecked = 0; | |||
3765 | for (auto II = ++LastStore.getReverseIterator(); | |||
3766 | II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck; | |||
3767 | ++II) { | |||
3768 | NumInstsChecked++; | |||
3769 | GStore *NewStore; | |||
3770 | if ((NewStore = dyn_cast<GStore>(&*II))) { | |||
3771 | if (NewStore->getMMO().getMemoryType() != MemTy || !NewStore->isSimple()) | |||
3772 | break; | |||
3773 | } else if (II->isLoadFoldBarrier() || II->mayLoad()) { | |||
3774 | break; | |||
3775 | } else { | |||
3776 | continue; // This is a safe instruction we can look past. | |||
3777 | } | |||
3778 | ||||
3779 | Register NewBaseReg; | |||
3780 | int64_t MemOffset; | |||
3781 | // Check we're storing to the same base + some offset. | |||
3782 | if (!mi_match(NewStore->getPointerReg(), MRI, | |||
3783 | m_GPtrAdd(m_Reg(NewBaseReg), m_ICst(MemOffset)))) { | |||
3784 | NewBaseReg = NewStore->getPointerReg(); | |||
3785 | MemOffset = 0; | |||
3786 | } | |||
3787 | if (BaseReg != NewBaseReg) | |||
3788 | break; | |||
3789 | ||||
3790 | auto ShiftByteOffset = getTruncStoreByteOffset(*NewStore, WideSrcVal, MRI); | |||
3791 | if (!ShiftByteOffset) | |||
3792 | break; | |||
3793 | if (MemOffset < LowestIdxOffset) { | |||
3794 | LowestIdxOffset = MemOffset; | |||
3795 | LowestIdxStore = NewStore; | |||
3796 | } | |||
3797 | ||||
3798 | // Map the offset in the store and the offset in the combined value, and | |||
3799 | // early return if it has been set before. | |||
3800 | if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired || | |||
3801 | OffsetMap[*ShiftByteOffset] != INT64_MAX(9223372036854775807L)) | |||
3802 | break; | |||
3803 | OffsetMap[*ShiftByteOffset] = MemOffset; | |||
3804 | ||||
3805 | FoundStores.emplace_back(NewStore); | |||
3806 | // Reset counter since we've found a matching inst. | |||
3807 | NumInstsChecked = 0; | |||
3808 | if (FoundStores.size() == NumStoresRequired) | |||
3809 | break; | |||
3810 | } | |||
3811 | ||||
3812 | if (FoundStores.size() != NumStoresRequired) { | |||
3813 | return false; | |||
3814 | } | |||
3815 | ||||
3816 | const auto &DL = LastStore.getMF()->getDataLayout(); | |||
3817 | auto &C = LastStore.getMF()->getFunction().getContext(); | |||
3818 | // Check that a store of the wide type is both allowed and fast on the target | |||
3819 | unsigned Fast = 0; | |||
3820 | bool Allowed = getTargetLowering().allowsMemoryAccess( | |||
3821 | C, DL, WideStoreTy, LowestIdxStore->getMMO(), &Fast); | |||
3822 | if (!Allowed || !Fast) | |||
3823 | return false; | |||
3824 | ||||
3825 | // Check if the pieces of the value are going to the expected places in memory | |||
3826 | // to merge the stores. | |||
3827 | unsigned NarrowBits = MemTy.getScalarSizeInBits(); | |||
3828 | auto checkOffsets = [&](bool MatchLittleEndian) { | |||
3829 | if (MatchLittleEndian) { | |||
3830 | for (unsigned i = 0; i != NumStoresRequired; ++i) | |||
3831 | if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset) | |||
3832 | return false; | |||
3833 | } else { // MatchBigEndian by reversing loop counter. | |||
3834 | for (unsigned i = 0, j = NumStoresRequired - 1; i != NumStoresRequired; | |||
3835 | ++i, --j) | |||
3836 | if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset) | |||
3837 | return false; | |||
3838 | } | |||
3839 | return true; | |||
3840 | }; | |||
3841 | ||||
3842 | // Check if the offsets line up for the native data layout of this target. | |||
3843 | bool NeedBswap = false; | |||
3844 | bool NeedRotate = false; | |||
3845 | if (!checkOffsets(DL.isLittleEndian())) { | |||
3846 | // Special-case: check if byte offsets line up for the opposite endian. | |||
3847 | if (NarrowBits == 8 && checkOffsets(DL.isBigEndian())) | |||
3848 | NeedBswap = true; | |||
3849 | else if (NumStoresRequired == 2 && checkOffsets(DL.isBigEndian())) | |||
3850 | NeedRotate = true; | |||
3851 | else | |||
3852 | return false; | |||
3853 | } | |||
3854 | ||||
3855 | if (NeedBswap && | |||
3856 | !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {WideStoreTy}})) | |||
3857 | return false; | |||
3858 | if (NeedRotate && | |||
3859 | !isLegalOrBeforeLegalizer({TargetOpcode::G_ROTR, {WideStoreTy}})) | |||
3860 | return false; | |||
3861 | ||||
3862 | MatchInfo.NeedBSwap = NeedBswap; | |||
3863 | MatchInfo.NeedRotate = NeedRotate; | |||
3864 | MatchInfo.LowestIdxStore = LowestIdxStore; | |||
3865 | MatchInfo.WideSrcVal = WideSrcVal; | |||
3866 | MatchInfo.FoundStores = std::move(FoundStores); | |||
3867 | return true; | |||
3868 | } | |||
3869 | ||||
3870 | void CombinerHelper::applyTruncStoreMerge(MachineInstr &MI, | |||
3871 | MergeTruncStoresInfo &MatchInfo) { | |||
3872 | ||||
3873 | Builder.setInstrAndDebugLoc(MI); | |||
3874 | Register WideSrcVal = MatchInfo.WideSrcVal; | |||
3875 | LLT WideStoreTy = MRI.getType(WideSrcVal); | |||
3876 | ||||
3877 | if (MatchInfo.NeedBSwap) { | |||
3878 | WideSrcVal = Builder.buildBSwap(WideStoreTy, WideSrcVal).getReg(0); | |||
3879 | } else if (MatchInfo.NeedRotate) { | |||
3880 | assert(WideStoreTy.getSizeInBits() % 2 == 0 &&(static_cast <bool> (WideStoreTy.getSizeInBits() % 2 == 0 && "Unexpected type for rotate") ? void (0) : __assert_fail ("WideStoreTy.getSizeInBits() % 2 == 0 && \"Unexpected type for rotate\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3881, __extension__ __PRETTY_FUNCTION__)) | |||
3881 | "Unexpected type for rotate")(static_cast <bool> (WideStoreTy.getSizeInBits() % 2 == 0 && "Unexpected type for rotate") ? void (0) : __assert_fail ("WideStoreTy.getSizeInBits() % 2 == 0 && \"Unexpected type for rotate\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3881, __extension__ __PRETTY_FUNCTION__)); | |||
3882 | auto RotAmt = | |||
3883 | Builder.buildConstant(WideStoreTy, WideStoreTy.getSizeInBits() / 2); | |||
3884 | WideSrcVal = | |||
3885 | Builder.buildRotateRight(WideStoreTy, WideSrcVal, RotAmt).getReg(0); | |||
3886 | } | |||
3887 | ||||
3888 | Builder.buildStore(WideSrcVal, MatchInfo.LowestIdxStore->getPointerReg(), | |||
3889 | MatchInfo.LowestIdxStore->getMMO().getPointerInfo(), | |||
3890 | MatchInfo.LowestIdxStore->getMMO().getAlign()); | |||
3891 | ||||
3892 | // Erase the old stores. | |||
3893 | for (auto *ST : MatchInfo.FoundStores) | |||
3894 | ST->eraseFromParent(); | |||
3895 | } | |||
3896 | ||||
3897 | bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI, | |||
3898 | MachineInstr *&ExtMI) { | |||
3899 | assert(MI.getOpcode() == TargetOpcode::G_PHI)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_PHI ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_PHI" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3899, __extension__ __PRETTY_FUNCTION__)); | |||
3900 | ||||
3901 | Register DstReg = MI.getOperand(0).getReg(); | |||
3902 | ||||
3903 | // TODO: Extending a vector may be expensive, don't do this until heuristics | |||
3904 | // are better. | |||
3905 | if (MRI.getType(DstReg).isVector()) | |||
3906 | return false; | |||
3907 | ||||
3908 | // Try to match a phi, whose only use is an extend. | |||
3909 | if (!MRI.hasOneNonDBGUse(DstReg)) | |||
3910 | return false; | |||
3911 | ExtMI = &*MRI.use_instr_nodbg_begin(DstReg); | |||
3912 | switch (ExtMI->getOpcode()) { | |||
3913 | case TargetOpcode::G_ANYEXT: | |||
3914 | return true; // G_ANYEXT is usually free. | |||
3915 | case TargetOpcode::G_ZEXT: | |||
3916 | case TargetOpcode::G_SEXT: | |||
3917 | break; | |||
3918 | default: | |||
3919 | return false; | |||
3920 | } | |||
3921 | ||||
3922 | // If the target is likely to fold this extend away, don't propagate. | |||
3923 | if (Builder.getTII().isExtendLikelyToBeFolded(*ExtMI, MRI)) | |||
3924 | return false; | |||
3925 | ||||
3926 | // We don't want to propagate the extends unless there's a good chance that | |||
3927 | // they'll be optimized in some way. | |||
3928 | // Collect the unique incoming values. | |||
3929 | SmallPtrSet<MachineInstr *, 4> InSrcs; | |||
3930 | for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) { | |||
3931 | auto *DefMI = getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI); | |||
3932 | switch (DefMI->getOpcode()) { | |||
3933 | case TargetOpcode::G_LOAD: | |||
3934 | case TargetOpcode::G_TRUNC: | |||
3935 | case TargetOpcode::G_SEXT: | |||
3936 | case TargetOpcode::G_ZEXT: | |||
3937 | case TargetOpcode::G_ANYEXT: | |||
3938 | case TargetOpcode::G_CONSTANT: | |||
3939 | InSrcs.insert(getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI)); | |||
3940 | // Don't try to propagate if there are too many places to create new | |||
3941 | // extends, chances are it'll increase code size. | |||
3942 | if (InSrcs.size() > 2) | |||
3943 | return false; | |||
3944 | break; | |||
3945 | default: | |||
3946 | return false; | |||
3947 | } | |||
3948 | } | |||
3949 | return true; | |||
3950 | } | |||
3951 | ||||
3952 | void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI, | |||
3953 | MachineInstr *&ExtMI) { | |||
3954 | assert(MI.getOpcode() == TargetOpcode::G_PHI)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_PHI ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_PHI" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3954, __extension__ __PRETTY_FUNCTION__)); | |||
3955 | Register DstReg = ExtMI->getOperand(0).getReg(); | |||
3956 | LLT ExtTy = MRI.getType(DstReg); | |||
3957 | ||||
3958 | // Propagate the extension into the block of each incoming reg's block. | |||
3959 | // Use a SetVector here because PHIs can have duplicate edges, and we want | |||
3960 | // deterministic iteration order. | |||
3961 | SmallSetVector<MachineInstr *, 8> SrcMIs; | |||
3962 | SmallDenseMap<MachineInstr *, MachineInstr *, 8> OldToNewSrcMap; | |||
3963 | for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); SrcIdx += 2) { | |||
3964 | auto *SrcMI = MRI.getVRegDef(MI.getOperand(SrcIdx).getReg()); | |||
3965 | if (!SrcMIs.insert(SrcMI)) | |||
3966 | continue; | |||
3967 | ||||
3968 | // Build an extend after each src inst. | |||
3969 | auto *MBB = SrcMI->getParent(); | |||
3970 | MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator(); | |||
3971 | if (InsertPt != MBB->end() && InsertPt->isPHI()) | |||
3972 | InsertPt = MBB->getFirstNonPHI(); | |||
3973 | ||||
3974 | Builder.setInsertPt(*SrcMI->getParent(), InsertPt); | |||
3975 | Builder.setDebugLoc(MI.getDebugLoc()); | |||
3976 | auto NewExt = Builder.buildExtOrTrunc(ExtMI->getOpcode(), ExtTy, | |||
3977 | SrcMI->getOperand(0).getReg()); | |||
3978 | OldToNewSrcMap[SrcMI] = NewExt; | |||
3979 | } | |||
3980 | ||||
3981 | // Create a new phi with the extended inputs. | |||
3982 | Builder.setInstrAndDebugLoc(MI); | |||
3983 | auto NewPhi = Builder.buildInstrNoInsert(TargetOpcode::G_PHI); | |||
3984 | NewPhi.addDef(DstReg); | |||
3985 | for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) { | |||
3986 | if (!MO.isReg()) { | |||
3987 | NewPhi.addMBB(MO.getMBB()); | |||
3988 | continue; | |||
3989 | } | |||
3990 | auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(MO.getReg())]; | |||
3991 | NewPhi.addUse(NewSrc->getOperand(0).getReg()); | |||
3992 | } | |||
3993 | Builder.insertInstr(NewPhi); | |||
3994 | ExtMI->eraseFromParent(); | |||
3995 | } | |||
3996 | ||||
3997 | bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI, | |||
3998 | Register &Reg) { | |||
3999 | assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 3999, __extension__ __PRETTY_FUNCTION__)); | |||
4000 | // If we have a constant index, look for a G_BUILD_VECTOR source | |||
4001 | // and find the source register that the index maps to. | |||
4002 | Register SrcVec = MI.getOperand(1).getReg(); | |||
4003 | LLT SrcTy = MRI.getType(SrcVec); | |||
4004 | ||||
4005 | auto Cst = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); | |||
4006 | if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements()) | |||
4007 | return false; | |||
4008 | ||||
4009 | unsigned VecIdx = Cst->Value.getZExtValue(); | |||
4010 | ||||
4011 | // Check if we have a build_vector or build_vector_trunc with an optional | |||
4012 | // trunc in front. | |||
4013 | MachineInstr *SrcVecMI = MRI.getVRegDef(SrcVec); | |||
4014 | if (SrcVecMI->getOpcode() == TargetOpcode::G_TRUNC) { | |||
4015 | SrcVecMI = MRI.getVRegDef(SrcVecMI->getOperand(1).getReg()); | |||
4016 | } | |||
4017 | ||||
4018 | if (SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR && | |||
4019 | SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC) | |||
4020 | return false; | |||
4021 | ||||
4022 | EVT Ty(getMVTForLLT(SrcTy)); | |||
4023 | if (!MRI.hasOneNonDBGUse(SrcVec) && | |||
4024 | !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty)) | |||
4025 | return false; | |||
4026 | ||||
4027 | Reg = SrcVecMI->getOperand(VecIdx + 1).getReg(); | |||
4028 | return true; | |||
4029 | } | |||
4030 | ||||
4031 | void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI, | |||
4032 | Register &Reg) { | |||
4033 | // Check the type of the register, since it may have come from a | |||
4034 | // G_BUILD_VECTOR_TRUNC. | |||
4035 | LLT ScalarTy = MRI.getType(Reg); | |||
4036 | Register DstReg = MI.getOperand(0).getReg(); | |||
4037 | LLT DstTy = MRI.getType(DstReg); | |||
4038 | ||||
4039 | Builder.setInstrAndDebugLoc(MI); | |||
4040 | if (ScalarTy != DstTy) { | |||
4041 | assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits())(static_cast <bool> (ScalarTy.getSizeInBits() > DstTy .getSizeInBits()) ? void (0) : __assert_fail ("ScalarTy.getSizeInBits() > DstTy.getSizeInBits()" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4041, __extension__ __PRETTY_FUNCTION__)); | |||
4042 | Builder.buildTrunc(DstReg, Reg); | |||
4043 | MI.eraseFromParent(); | |||
4044 | return; | |||
4045 | } | |||
4046 | replaceSingleDefInstWithReg(MI, Reg); | |||
4047 | } | |||
4048 | ||||
4049 | bool CombinerHelper::matchExtractAllEltsFromBuildVector( | |||
4050 | MachineInstr &MI, | |||
4051 | SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) { | |||
4052 | assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4052, __extension__ __PRETTY_FUNCTION__)); | |||
4053 | // This combine tries to find build_vector's which have every source element | |||
4054 | // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like | |||
4055 | // the masked load scalarization is run late in the pipeline. There's already | |||
4056 | // a combine for a similar pattern starting from the extract, but that | |||
4057 | // doesn't attempt to do it if there are multiple uses of the build_vector, | |||
4058 | // which in this case is true. Starting the combine from the build_vector | |||
4059 | // feels more natural than trying to find sibling nodes of extracts. | |||
4060 | // E.g. | |||
4061 | // %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4 | |||
4062 | // %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0 | |||
4063 | // %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1 | |||
4064 | // %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2 | |||
4065 | // %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3 | |||
4066 | // ==> | |||
4067 | // replace ext{1,2,3,4} with %s{1,2,3,4} | |||
4068 | ||||
4069 | Register DstReg = MI.getOperand(0).getReg(); | |||
4070 | LLT DstTy = MRI.getType(DstReg); | |||
4071 | unsigned NumElts = DstTy.getNumElements(); | |||
4072 | ||||
4073 | SmallBitVector ExtractedElts(NumElts); | |||
4074 | for (MachineInstr &II : MRI.use_nodbg_instructions(DstReg)) { | |||
4075 | if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT) | |||
4076 | return false; | |||
4077 | auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI); | |||
4078 | if (!Cst) | |||
4079 | return false; | |||
4080 | unsigned Idx = Cst->getZExtValue(); | |||
4081 | if (Idx >= NumElts) | |||
4082 | return false; // Out of range. | |||
4083 | ExtractedElts.set(Idx); | |||
4084 | SrcDstPairs.emplace_back( | |||
4085 | std::make_pair(MI.getOperand(Idx + 1).getReg(), &II)); | |||
4086 | } | |||
4087 | // Match if every element was extracted. | |||
4088 | return ExtractedElts.all(); | |||
4089 | } | |||
4090 | ||||
4091 | void CombinerHelper::applyExtractAllEltsFromBuildVector( | |||
4092 | MachineInstr &MI, | |||
4093 | SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) { | |||
4094 | assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4094, __extension__ __PRETTY_FUNCTION__)); | |||
4095 | for (auto &Pair : SrcDstPairs) { | |||
4096 | auto *ExtMI = Pair.second; | |||
4097 | replaceRegWith(MRI, ExtMI->getOperand(0).getReg(), Pair.first); | |||
4098 | ExtMI->eraseFromParent(); | |||
4099 | } | |||
4100 | MI.eraseFromParent(); | |||
4101 | } | |||
4102 | ||||
4103 | void CombinerHelper::applyBuildFn( | |||
4104 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
4105 | Builder.setInstrAndDebugLoc(MI); | |||
4106 | MatchInfo(Builder); | |||
4107 | MI.eraseFromParent(); | |||
4108 | } | |||
4109 | ||||
4110 | void CombinerHelper::applyBuildFnNoErase( | |||
4111 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
4112 | Builder.setInstrAndDebugLoc(MI); | |||
4113 | MatchInfo(Builder); | |||
4114 | } | |||
4115 | ||||
4116 | bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI, | |||
4117 | BuildFnTy &MatchInfo) { | |||
4118 | assert(MI.getOpcode() == TargetOpcode::G_OR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_OR ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_OR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4118, __extension__ __PRETTY_FUNCTION__)); | |||
4119 | ||||
4120 | Register Dst = MI.getOperand(0).getReg(); | |||
4121 | LLT Ty = MRI.getType(Dst); | |||
4122 | unsigned BitWidth = Ty.getScalarSizeInBits(); | |||
4123 | ||||
4124 | Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt; | |||
4125 | unsigned FshOpc = 0; | |||
4126 | ||||
4127 | // Match (or (shl ...), (lshr ...)). | |||
4128 | if (!mi_match(Dst, MRI, | |||
4129 | // m_GOr() handles the commuted version as well. | |||
4130 | m_GOr(m_GShl(m_Reg(ShlSrc), m_Reg(ShlAmt)), | |||
4131 | m_GLShr(m_Reg(LShrSrc), m_Reg(LShrAmt))))) | |||
4132 | return false; | |||
4133 | ||||
4134 | // Given constants C0 and C1 such that C0 + C1 is bit-width: | |||
4135 | // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1) | |||
4136 | int64_t CstShlAmt, CstLShrAmt; | |||
4137 | if (mi_match(ShlAmt, MRI, m_ICstOrSplat(CstShlAmt)) && | |||
4138 | mi_match(LShrAmt, MRI, m_ICstOrSplat(CstLShrAmt)) && | |||
4139 | CstShlAmt + CstLShrAmt == BitWidth) { | |||
4140 | FshOpc = TargetOpcode::G_FSHR; | |||
4141 | Amt = LShrAmt; | |||
4142 | ||||
4143 | } else if (mi_match(LShrAmt, MRI, | |||
4144 | m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) && | |||
4145 | ShlAmt == Amt) { | |||
4146 | // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt) | |||
4147 | FshOpc = TargetOpcode::G_FSHL; | |||
4148 | ||||
4149 | } else if (mi_match(ShlAmt, MRI, | |||
4150 | m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) && | |||
4151 | LShrAmt == Amt) { | |||
4152 | // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt) | |||
4153 | FshOpc = TargetOpcode::G_FSHR; | |||
4154 | ||||
4155 | } else { | |||
4156 | return false; | |||
4157 | } | |||
4158 | ||||
4159 | LLT AmtTy = MRI.getType(Amt); | |||
4160 | if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}})) | |||
4161 | return false; | |||
4162 | ||||
4163 | MatchInfo = [=](MachineIRBuilder &B) { | |||
4164 | B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt}); | |||
4165 | }; | |||
4166 | return true; | |||
4167 | } | |||
4168 | ||||
4169 | /// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate. | |||
4170 | bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) { | |||
4171 | unsigned Opc = MI.getOpcode(); | |||
4172 | assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR)(static_cast <bool> (Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4172, __extension__ __PRETTY_FUNCTION__)); | |||
4173 | Register X = MI.getOperand(1).getReg(); | |||
4174 | Register Y = MI.getOperand(2).getReg(); | |||
4175 | if (X != Y) | |||
4176 | return false; | |||
4177 | unsigned RotateOpc = | |||
4178 | Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR; | |||
4179 | return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}}); | |||
4180 | } | |||
4181 | ||||
4182 | void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) { | |||
4183 | unsigned Opc = MI.getOpcode(); | |||
4184 | assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR)(static_cast <bool> (Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4184, __extension__ __PRETTY_FUNCTION__)); | |||
4185 | bool IsFSHL = Opc == TargetOpcode::G_FSHL; | |||
4186 | Observer.changingInstr(MI); | |||
4187 | MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL | |||
4188 | : TargetOpcode::G_ROTR)); | |||
4189 | MI.removeOperand(2); | |||
4190 | Observer.changedInstr(MI); | |||
4191 | } | |||
4192 | ||||
4193 | // Fold (rot x, c) -> (rot x, c % BitSize) | |||
4194 | bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) { | |||
4195 | assert(MI.getOpcode() == TargetOpcode::G_ROTL ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4196, __extension__ __PRETTY_FUNCTION__)) | |||
4196 | MI.getOpcode() == TargetOpcode::G_ROTR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4196, __extension__ __PRETTY_FUNCTION__)); | |||
4197 | unsigned Bitsize = | |||
4198 | MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits(); | |||
4199 | Register AmtReg = MI.getOperand(2).getReg(); | |||
4200 | bool OutOfRange = false; | |||
4201 | auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) { | |||
4202 | if (auto *CI = dyn_cast<ConstantInt>(C)) | |||
4203 | OutOfRange |= CI->getValue().uge(Bitsize); | |||
4204 | return true; | |||
4205 | }; | |||
4206 | return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange; | |||
4207 | } | |||
4208 | ||||
4209 | void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) { | |||
4210 | assert(MI.getOpcode() == TargetOpcode::G_ROTL ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4211, __extension__ __PRETTY_FUNCTION__)) | |||
4211 | MI.getOpcode() == TargetOpcode::G_ROTR)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ROTL || MI.getOpcode() == TargetOpcode::G_ROTR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4211, __extension__ __PRETTY_FUNCTION__)); | |||
4212 | unsigned Bitsize = | |||
4213 | MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits(); | |||
4214 | Builder.setInstrAndDebugLoc(MI); | |||
4215 | Register Amt = MI.getOperand(2).getReg(); | |||
4216 | LLT AmtTy = MRI.getType(Amt); | |||
4217 | auto Bits = Builder.buildConstant(AmtTy, Bitsize); | |||
4218 | Amt = Builder.buildURem(AmtTy, MI.getOperand(2).getReg(), Bits).getReg(0); | |||
4219 | Observer.changingInstr(MI); | |||
4220 | MI.getOperand(2).setReg(Amt); | |||
4221 | Observer.changedInstr(MI); | |||
4222 | } | |||
4223 | ||||
4224 | bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI, | |||
4225 | int64_t &MatchInfo) { | |||
4226 | assert(MI.getOpcode() == TargetOpcode::G_ICMP)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ICMP ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ICMP" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4226, __extension__ __PRETTY_FUNCTION__)); | |||
4227 | auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); | |||
4228 | auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg()); | |||
4229 | auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg()); | |||
4230 | std::optional<bool> KnownVal; | |||
4231 | switch (Pred) { | |||
4232 | default: | |||
4233 | llvm_unreachable("Unexpected G_ICMP predicate?")::llvm::llvm_unreachable_internal("Unexpected G_ICMP predicate?" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4233); | |||
4234 | case CmpInst::ICMP_EQ: | |||
4235 | KnownVal = KnownBits::eq(KnownLHS, KnownRHS); | |||
4236 | break; | |||
4237 | case CmpInst::ICMP_NE: | |||
4238 | KnownVal = KnownBits::ne(KnownLHS, KnownRHS); | |||
4239 | break; | |||
4240 | case CmpInst::ICMP_SGE: | |||
4241 | KnownVal = KnownBits::sge(KnownLHS, KnownRHS); | |||
4242 | break; | |||
4243 | case CmpInst::ICMP_SGT: | |||
4244 | KnownVal = KnownBits::sgt(KnownLHS, KnownRHS); | |||
4245 | break; | |||
4246 | case CmpInst::ICMP_SLE: | |||
4247 | KnownVal = KnownBits::sle(KnownLHS, KnownRHS); | |||
4248 | break; | |||
4249 | case CmpInst::ICMP_SLT: | |||
4250 | KnownVal = KnownBits::slt(KnownLHS, KnownRHS); | |||
4251 | break; | |||
4252 | case CmpInst::ICMP_UGE: | |||
4253 | KnownVal = KnownBits::uge(KnownLHS, KnownRHS); | |||
4254 | break; | |||
4255 | case CmpInst::ICMP_UGT: | |||
4256 | KnownVal = KnownBits::ugt(KnownLHS, KnownRHS); | |||
4257 | break; | |||
4258 | case CmpInst::ICMP_ULE: | |||
4259 | KnownVal = KnownBits::ule(KnownLHS, KnownRHS); | |||
4260 | break; | |||
4261 | case CmpInst::ICMP_ULT: | |||
4262 | KnownVal = KnownBits::ult(KnownLHS, KnownRHS); | |||
4263 | break; | |||
4264 | } | |||
4265 | if (!KnownVal) | |||
4266 | return false; | |||
4267 | MatchInfo = | |||
4268 | *KnownVal | |||
4269 | ? getICmpTrueVal(getTargetLowering(), | |||
4270 | /*IsVector = */ | |||
4271 | MRI.getType(MI.getOperand(0).getReg()).isVector(), | |||
4272 | /* IsFP = */ false) | |||
4273 | : 0; | |||
4274 | return true; | |||
4275 | } | |||
4276 | ||||
4277 | bool CombinerHelper::matchICmpToLHSKnownBits( | |||
4278 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
4279 | assert(MI.getOpcode() == TargetOpcode::G_ICMP)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_ICMP ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_ICMP" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4279, __extension__ __PRETTY_FUNCTION__)); | |||
4280 | // Given: | |||
4281 | // | |||
4282 | // %x = G_WHATEVER (... x is known to be 0 or 1 ...) | |||
4283 | // %cmp = G_ICMP ne %x, 0 | |||
4284 | // | |||
4285 | // Or: | |||
4286 | // | |||
4287 | // %x = G_WHATEVER (... x is known to be 0 or 1 ...) | |||
4288 | // %cmp = G_ICMP eq %x, 1 | |||
4289 | // | |||
4290 | // We can replace %cmp with %x assuming true is 1 on the target. | |||
4291 | auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); | |||
4292 | if (!CmpInst::isEquality(Pred)) | |||
4293 | return false; | |||
4294 | Register Dst = MI.getOperand(0).getReg(); | |||
4295 | LLT DstTy = MRI.getType(Dst); | |||
4296 | if (getICmpTrueVal(getTargetLowering(), DstTy.isVector(), | |||
4297 | /* IsFP = */ false) != 1) | |||
4298 | return false; | |||
4299 | int64_t OneOrZero = Pred == CmpInst::ICMP_EQ; | |||
4300 | if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICst(OneOrZero))) | |||
4301 | return false; | |||
4302 | Register LHS = MI.getOperand(2).getReg(); | |||
4303 | auto KnownLHS = KB->getKnownBits(LHS); | |||
4304 | if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1) | |||
4305 | return false; | |||
4306 | // Make sure replacing Dst with the LHS is a legal operation. | |||
4307 | LLT LHSTy = MRI.getType(LHS); | |||
4308 | unsigned LHSSize = LHSTy.getSizeInBits(); | |||
4309 | unsigned DstSize = DstTy.getSizeInBits(); | |||
4310 | unsigned Op = TargetOpcode::COPY; | |||
4311 | if (DstSize != LHSSize) | |||
4312 | Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT; | |||
4313 | if (!isLegalOrBeforeLegalizer({Op, {DstTy, LHSTy}})) | |||
4314 | return false; | |||
4315 | MatchInfo = [=](MachineIRBuilder &B) { B.buildInstr(Op, {Dst}, {LHS}); }; | |||
4316 | return true; | |||
4317 | } | |||
4318 | ||||
4319 | // Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0 | |||
4320 | bool CombinerHelper::matchAndOrDisjointMask( | |||
4321 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
4322 | assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4322, __extension__ __PRETTY_FUNCTION__)); | |||
4323 | ||||
4324 | // Ignore vector types to simplify matching the two constants. | |||
4325 | // TODO: do this for vectors and scalars via a demanded bits analysis. | |||
4326 | LLT Ty = MRI.getType(MI.getOperand(0).getReg()); | |||
4327 | if (Ty.isVector()) | |||
4328 | return false; | |||
4329 | ||||
4330 | Register Src; | |||
4331 | Register AndMaskReg; | |||
4332 | int64_t AndMaskBits; | |||
4333 | int64_t OrMaskBits; | |||
4334 | if (!mi_match(MI, MRI, | |||
4335 | m_GAnd(m_GOr(m_Reg(Src), m_ICst(OrMaskBits)), | |||
4336 | m_all_of(m_ICst(AndMaskBits), m_Reg(AndMaskReg))))) | |||
4337 | return false; | |||
4338 | ||||
4339 | // Check if OrMask could turn on any bits in Src. | |||
4340 | if (AndMaskBits & OrMaskBits) | |||
4341 | return false; | |||
4342 | ||||
4343 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
4344 | Observer.changingInstr(MI); | |||
4345 | // Canonicalize the result to have the constant on the RHS. | |||
4346 | if (MI.getOperand(1).getReg() == AndMaskReg) | |||
4347 | MI.getOperand(2).setReg(AndMaskReg); | |||
4348 | MI.getOperand(1).setReg(Src); | |||
4349 | Observer.changedInstr(MI); | |||
4350 | }; | |||
4351 | return true; | |||
4352 | } | |||
4353 | ||||
4354 | /// Form a G_SBFX from a G_SEXT_INREG fed by a right shift. | |||
4355 | bool CombinerHelper::matchBitfieldExtractFromSExtInReg( | |||
4356 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
4357 | assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SEXT_INREG ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SEXT_INREG" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4357, __extension__ __PRETTY_FUNCTION__)); | |||
4358 | Register Dst = MI.getOperand(0).getReg(); | |||
4359 | Register Src = MI.getOperand(1).getReg(); | |||
4360 | LLT Ty = MRI.getType(Src); | |||
4361 | LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); | |||
4362 | if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}})) | |||
4363 | return false; | |||
4364 | int64_t Width = MI.getOperand(2).getImm(); | |||
4365 | Register ShiftSrc; | |||
4366 | int64_t ShiftImm; | |||
4367 | if (!mi_match( | |||
4368 | Src, MRI, | |||
4369 | m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)), | |||
4370 | m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)))))) | |||
4371 | return false; | |||
4372 | if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits()) | |||
4373 | return false; | |||
4374 | ||||
4375 | MatchInfo = [=](MachineIRBuilder &B) { | |||
4376 | auto Cst1 = B.buildConstant(ExtractTy, ShiftImm); | |||
4377 | auto Cst2 = B.buildConstant(ExtractTy, Width); | |||
4378 | B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2); | |||
4379 | }; | |||
4380 | return true; | |||
4381 | } | |||
4382 | ||||
4383 | /// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants. | |||
4384 | bool CombinerHelper::matchBitfieldExtractFromAnd( | |||
4385 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
4386 | assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4386, __extension__ __PRETTY_FUNCTION__)); | |||
4387 | Register Dst = MI.getOperand(0).getReg(); | |||
4388 | LLT Ty = MRI.getType(Dst); | |||
4389 | LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); | |||
4390 | if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal( | |||
4391 | TargetOpcode::G_UBFX, Ty, ExtractTy)) | |||
4392 | return false; | |||
4393 | ||||
4394 | int64_t AndImm, LSBImm; | |||
4395 | Register ShiftSrc; | |||
4396 | const unsigned Size = Ty.getScalarSizeInBits(); | |||
4397 | if (!mi_match(MI.getOperand(0).getReg(), MRI, | |||
4398 | m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))), | |||
4399 | m_ICst(AndImm)))) | |||
4400 | return false; | |||
4401 | ||||
4402 | // The mask is a mask of the low bits iff imm & (imm+1) == 0. | |||
4403 | auto MaybeMask = static_cast<uint64_t>(AndImm); | |||
4404 | if (MaybeMask & (MaybeMask + 1)) | |||
4405 | return false; | |||
4406 | ||||
4407 | // LSB must fit within the register. | |||
4408 | if (static_cast<uint64_t>(LSBImm) >= Size) | |||
4409 | return false; | |||
4410 | ||||
4411 | uint64_t Width = APInt(Size, AndImm).countr_one(); | |||
4412 | MatchInfo = [=](MachineIRBuilder &B) { | |||
4413 | auto WidthCst = B.buildConstant(ExtractTy, Width); | |||
4414 | auto LSBCst = B.buildConstant(ExtractTy, LSBImm); | |||
4415 | B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst}); | |||
4416 | }; | |||
4417 | return true; | |||
4418 | } | |||
4419 | ||||
4420 | bool CombinerHelper::matchBitfieldExtractFromShr( | |||
4421 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
4422 | const unsigned Opcode = MI.getOpcode(); | |||
4423 | assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR)(static_cast <bool> (Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR) ? void (0) : __assert_fail ("Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4423, __extension__ __PRETTY_FUNCTION__)); | |||
4424 | ||||
4425 | const Register Dst = MI.getOperand(0).getReg(); | |||
4426 | ||||
4427 | const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR | |||
4428 | ? TargetOpcode::G_SBFX | |||
4429 | : TargetOpcode::G_UBFX; | |||
4430 | ||||
4431 | // Check if the type we would use for the extract is legal | |||
4432 | LLT Ty = MRI.getType(Dst); | |||
4433 | LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); | |||
4434 | if (!LI || !LI->isLegalOrCustom({ExtrOpcode, {Ty, ExtractTy}})) | |||
4435 | return false; | |||
4436 | ||||
4437 | Register ShlSrc; | |||
4438 | int64_t ShrAmt; | |||
4439 | int64_t ShlAmt; | |||
4440 | const unsigned Size = Ty.getScalarSizeInBits(); | |||
4441 | ||||
4442 | // Try to match shr (shl x, c1), c2 | |||
4443 | if (!mi_match(Dst, MRI, | |||
4444 | m_BinOp(Opcode, | |||
4445 | m_OneNonDBGUse(m_GShl(m_Reg(ShlSrc), m_ICst(ShlAmt))), | |||
4446 | m_ICst(ShrAmt)))) | |||
4447 | return false; | |||
4448 | ||||
4449 | // Make sure that the shift sizes can fit a bitfield extract | |||
4450 | if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size) | |||
4451 | return false; | |||
4452 | ||||
4453 | // Skip this combine if the G_SEXT_INREG combine could handle it | |||
4454 | if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt) | |||
4455 | return false; | |||
4456 | ||||
4457 | // Calculate start position and width of the extract | |||
4458 | const int64_t Pos = ShrAmt - ShlAmt; | |||
4459 | const int64_t Width = Size - ShrAmt; | |||
4460 | ||||
4461 | MatchInfo = [=](MachineIRBuilder &B) { | |||
4462 | auto WidthCst = B.buildConstant(ExtractTy, Width); | |||
4463 | auto PosCst = B.buildConstant(ExtractTy, Pos); | |||
4464 | B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst}); | |||
4465 | }; | |||
4466 | return true; | |||
4467 | } | |||
4468 | ||||
4469 | bool CombinerHelper::matchBitfieldExtractFromShrAnd( | |||
4470 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
4471 | const unsigned Opcode = MI.getOpcode(); | |||
4472 | assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR)(static_cast <bool> (Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR) ? void (0) : __assert_fail ("Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4472, __extension__ __PRETTY_FUNCTION__)); | |||
4473 | ||||
4474 | const Register Dst = MI.getOperand(0).getReg(); | |||
4475 | LLT Ty = MRI.getType(Dst); | |||
4476 | LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); | |||
4477 | if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal( | |||
4478 | TargetOpcode::G_UBFX, Ty, ExtractTy)) | |||
4479 | return false; | |||
4480 | ||||
4481 | // Try to match shr (and x, c1), c2 | |||
4482 | Register AndSrc; | |||
4483 | int64_t ShrAmt; | |||
4484 | int64_t SMask; | |||
4485 | if (!mi_match(Dst, MRI, | |||
4486 | m_BinOp(Opcode, | |||
4487 | m_OneNonDBGUse(m_GAnd(m_Reg(AndSrc), m_ICst(SMask))), | |||
4488 | m_ICst(ShrAmt)))) | |||
4489 | return false; | |||
4490 | ||||
4491 | const unsigned Size = Ty.getScalarSizeInBits(); | |||
4492 | if (ShrAmt < 0 || ShrAmt >= Size) | |||
4493 | return false; | |||
4494 | ||||
4495 | // If the shift subsumes the mask, emit the 0 directly. | |||
4496 | if (0 == (SMask >> ShrAmt)) { | |||
4497 | MatchInfo = [=](MachineIRBuilder &B) { | |||
4498 | B.buildConstant(Dst, 0); | |||
4499 | }; | |||
4500 | return true; | |||
4501 | } | |||
4502 | ||||
4503 | // Check that ubfx can do the extraction, with no holes in the mask. | |||
4504 | uint64_t UMask = SMask; | |||
4505 | UMask |= maskTrailingOnes<uint64_t>(ShrAmt); | |||
4506 | UMask &= maskTrailingOnes<uint64_t>(Size); | |||
4507 | if (!isMask_64(UMask)) | |||
4508 | return false; | |||
4509 | ||||
4510 | // Calculate start position and width of the extract. | |||
4511 | const int64_t Pos = ShrAmt; | |||
4512 | const int64_t Width = llvm::countr_one(UMask) - ShrAmt; | |||
4513 | ||||
4514 | // It's preferable to keep the shift, rather than form G_SBFX. | |||
4515 | // TODO: remove the G_AND via demanded bits analysis. | |||
4516 | if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt == Size) | |||
4517 | return false; | |||
4518 | ||||
4519 | MatchInfo = [=](MachineIRBuilder &B) { | |||
4520 | auto WidthCst = B.buildConstant(ExtractTy, Width); | |||
4521 | auto PosCst = B.buildConstant(ExtractTy, Pos); | |||
4522 | B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst}); | |||
4523 | }; | |||
4524 | return true; | |||
4525 | } | |||
4526 | ||||
4527 | bool CombinerHelper::reassociationCanBreakAddressingModePattern( | |||
4528 | MachineInstr &PtrAdd) { | |||
4529 | assert(PtrAdd.getOpcode() == TargetOpcode::G_PTR_ADD)(static_cast <bool> (PtrAdd.getOpcode() == TargetOpcode ::G_PTR_ADD) ? void (0) : __assert_fail ("PtrAdd.getOpcode() == TargetOpcode::G_PTR_ADD" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4529, __extension__ __PRETTY_FUNCTION__)); | |||
4530 | ||||
4531 | Register Src1Reg = PtrAdd.getOperand(1).getReg(); | |||
4532 | MachineInstr *Src1Def = getOpcodeDef(TargetOpcode::G_PTR_ADD, Src1Reg, MRI); | |||
4533 | if (!Src1Def) | |||
4534 | return false; | |||
4535 | ||||
4536 | Register Src2Reg = PtrAdd.getOperand(2).getReg(); | |||
4537 | ||||
4538 | if (MRI.hasOneNonDBGUse(Src1Reg)) | |||
4539 | return false; | |||
4540 | ||||
4541 | auto C1 = getIConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI); | |||
4542 | if (!C1) | |||
4543 | return false; | |||
4544 | auto C2 = getIConstantVRegVal(Src2Reg, MRI); | |||
4545 | if (!C2) | |||
4546 | return false; | |||
4547 | ||||
4548 | const APInt &C1APIntVal = *C1; | |||
4549 | const APInt &C2APIntVal = *C2; | |||
4550 | const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue(); | |||
4551 | ||||
4552 | for (auto &UseMI : MRI.use_nodbg_instructions(Src1Reg)) { | |||
4553 | // This combine may end up running before ptrtoint/inttoptr combines | |||
4554 | // manage to eliminate redundant conversions, so try to look through them. | |||
4555 | MachineInstr *ConvUseMI = &UseMI; | |||
4556 | unsigned ConvUseOpc = ConvUseMI->getOpcode(); | |||
4557 | while (ConvUseOpc == TargetOpcode::G_INTTOPTR || | |||
4558 | ConvUseOpc == TargetOpcode::G_PTRTOINT) { | |||
4559 | Register DefReg = ConvUseMI->getOperand(0).getReg(); | |||
4560 | if (!MRI.hasOneNonDBGUse(DefReg)) | |||
4561 | break; | |||
4562 | ConvUseMI = &*MRI.use_instr_nodbg_begin(DefReg); | |||
4563 | ConvUseOpc = ConvUseMI->getOpcode(); | |||
4564 | } | |||
4565 | auto LoadStore = ConvUseOpc == TargetOpcode::G_LOAD || | |||
4566 | ConvUseOpc == TargetOpcode::G_STORE; | |||
4567 | if (!LoadStore) | |||
4568 | continue; | |||
4569 | // Is x[offset2] already not a legal addressing mode? If so then | |||
4570 | // reassociating the constants breaks nothing (we test offset2 because | |||
4571 | // that's the one we hope to fold into the load or store). | |||
4572 | TargetLoweringBase::AddrMode AM; | |||
4573 | AM.HasBaseReg = true; | |||
4574 | AM.BaseOffs = C2APIntVal.getSExtValue(); | |||
4575 | unsigned AS = | |||
4576 | MRI.getType(ConvUseMI->getOperand(1).getReg()).getAddressSpace(); | |||
4577 | Type *AccessTy = | |||
4578 | getTypeForLLT(MRI.getType(ConvUseMI->getOperand(0).getReg()), | |||
4579 | PtrAdd.getMF()->getFunction().getContext()); | |||
4580 | const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering(); | |||
4581 | if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM, | |||
4582 | AccessTy, AS)) | |||
4583 | continue; | |||
4584 | ||||
4585 | // Would x[offset1+offset2] still be a legal addressing mode? | |||
4586 | AM.BaseOffs = CombinedValue; | |||
4587 | if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM, | |||
4588 | AccessTy, AS)) | |||
4589 | return true; | |||
4590 | } | |||
4591 | ||||
4592 | return false; | |||
4593 | } | |||
4594 | ||||
4595 | bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd &MI, | |||
4596 | MachineInstr *RHS, | |||
4597 | BuildFnTy &MatchInfo) { | |||
4598 | // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C) | |||
4599 | Register Src1Reg = MI.getOperand(1).getReg(); | |||
4600 | if (RHS->getOpcode() != TargetOpcode::G_ADD) | |||
4601 | return false; | |||
4602 | auto C2 = getIConstantVRegVal(RHS->getOperand(2).getReg(), MRI); | |||
4603 | if (!C2) | |||
4604 | return false; | |||
4605 | ||||
4606 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
4607 | LLT PtrTy = MRI.getType(MI.getOperand(0).getReg()); | |||
4608 | ||||
4609 | auto NewBase = | |||
4610 | Builder.buildPtrAdd(PtrTy, Src1Reg, RHS->getOperand(1).getReg()); | |||
4611 | Observer.changingInstr(MI); | |||
4612 | MI.getOperand(1).setReg(NewBase.getReg(0)); | |||
4613 | MI.getOperand(2).setReg(RHS->getOperand(2).getReg()); | |||
4614 | Observer.changedInstr(MI); | |||
4615 | }; | |||
4616 | return !reassociationCanBreakAddressingModePattern(MI); | |||
4617 | } | |||
4618 | ||||
4619 | bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI, | |||
4620 | MachineInstr *LHS, | |||
4621 | MachineInstr *RHS, | |||
4622 | BuildFnTy &MatchInfo) { | |||
4623 | // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C) | |||
4624 | // if and only if (G_PTR_ADD X, C) has one use. | |||
4625 | Register LHSBase; | |||
4626 | std::optional<ValueAndVReg> LHSCstOff; | |||
4627 | if (!mi_match(MI.getBaseReg(), MRI, | |||
4628 | m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff))))) | |||
4629 | return false; | |||
4630 | ||||
4631 | auto *LHSPtrAdd = cast<GPtrAdd>(LHS); | |||
4632 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
4633 | // When we change LHSPtrAdd's offset register we might cause it to use a reg | |||
4634 | // before its def. Sink the instruction so the outer PTR_ADD to ensure this | |||
4635 | // doesn't happen. | |||
4636 | LHSPtrAdd->moveBefore(&MI); | |||
4637 | Register RHSReg = MI.getOffsetReg(); | |||
4638 | // set VReg will cause type mismatch if it comes from extend/trunc | |||
4639 | auto NewCst = B.buildConstant(MRI.getType(RHSReg), LHSCstOff->Value); | |||
4640 | Observer.changingInstr(MI); | |||
4641 | MI.getOperand(2).setReg(NewCst.getReg(0)); | |||
4642 | Observer.changedInstr(MI); | |||
4643 | Observer.changingInstr(*LHSPtrAdd); | |||
4644 | LHSPtrAdd->getOperand(2).setReg(RHSReg); | |||
4645 | Observer.changedInstr(*LHSPtrAdd); | |||
4646 | }; | |||
4647 | return !reassociationCanBreakAddressingModePattern(MI); | |||
4648 | } | |||
4649 | ||||
4650 | bool CombinerHelper::matchReassocFoldConstantsInSubTree(GPtrAdd &MI, | |||
4651 | MachineInstr *LHS, | |||
4652 | MachineInstr *RHS, | |||
4653 | BuildFnTy &MatchInfo) { | |||
4654 | // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2) | |||
4655 | auto *LHSPtrAdd = dyn_cast<GPtrAdd>(LHS); | |||
4656 | if (!LHSPtrAdd) | |||
4657 | return false; | |||
4658 | ||||
4659 | Register Src2Reg = MI.getOperand(2).getReg(); | |||
4660 | Register LHSSrc1 = LHSPtrAdd->getBaseReg(); | |||
4661 | Register LHSSrc2 = LHSPtrAdd->getOffsetReg(); | |||
4662 | auto C1 = getIConstantVRegVal(LHSSrc2, MRI); | |||
4663 | if (!C1) | |||
4664 | return false; | |||
4665 | auto C2 = getIConstantVRegVal(Src2Reg, MRI); | |||
4666 | if (!C2) | |||
4667 | return false; | |||
4668 | ||||
4669 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
4670 | auto NewCst = B.buildConstant(MRI.getType(Src2Reg), *C1 + *C2); | |||
4671 | Observer.changingInstr(MI); | |||
4672 | MI.getOperand(1).setReg(LHSSrc1); | |||
4673 | MI.getOperand(2).setReg(NewCst.getReg(0)); | |||
4674 | Observer.changedInstr(MI); | |||
4675 | }; | |||
4676 | return !reassociationCanBreakAddressingModePattern(MI); | |||
4677 | } | |||
4678 | ||||
4679 | bool CombinerHelper::matchReassocPtrAdd(MachineInstr &MI, | |||
4680 | BuildFnTy &MatchInfo) { | |||
4681 | auto &PtrAdd = cast<GPtrAdd>(MI); | |||
4682 | // We're trying to match a few pointer computation patterns here for | |||
4683 | // re-association opportunities. | |||
4684 | // 1) Isolating a constant operand to be on the RHS, e.g.: | |||
4685 | // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C) | |||
4686 | // | |||
4687 | // 2) Folding two constants in each sub-tree as long as such folding | |||
4688 | // doesn't break a legal addressing mode. | |||
4689 | // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2) | |||
4690 | // | |||
4691 | // 3) Move a constant from the LHS of an inner op to the RHS of the outer. | |||
4692 | // G_PTR_ADD (G_PTR_ADD X, C), Y) -> G_PTR_ADD (G_PTR_ADD(X, Y), C) | |||
4693 | // iif (G_PTR_ADD X, C) has one use. | |||
4694 | MachineInstr *LHS = MRI.getVRegDef(PtrAdd.getBaseReg()); | |||
4695 | MachineInstr *RHS = MRI.getVRegDef(PtrAdd.getOffsetReg()); | |||
4696 | ||||
4697 | // Try to match example 2. | |||
4698 | if (matchReassocFoldConstantsInSubTree(PtrAdd, LHS, RHS, MatchInfo)) | |||
4699 | return true; | |||
4700 | ||||
4701 | // Try to match example 3. | |||
4702 | if (matchReassocConstantInnerLHS(PtrAdd, LHS, RHS, MatchInfo)) | |||
4703 | return true; | |||
4704 | ||||
4705 | // Try to match example 1. | |||
4706 | if (matchReassocConstantInnerRHS(PtrAdd, RHS, MatchInfo)) | |||
4707 | return true; | |||
4708 | ||||
4709 | return false; | |||
4710 | } | |||
4711 | ||||
4712 | bool CombinerHelper::matchConstantFold(MachineInstr &MI, APInt &MatchInfo) { | |||
4713 | Register Op1 = MI.getOperand(1).getReg(); | |||
4714 | Register Op2 = MI.getOperand(2).getReg(); | |||
4715 | auto MaybeCst = ConstantFoldBinOp(MI.getOpcode(), Op1, Op2, MRI); | |||
4716 | if (!MaybeCst) | |||
4717 | return false; | |||
4718 | MatchInfo = *MaybeCst; | |||
4719 | return true; | |||
4720 | } | |||
4721 | ||||
4722 | bool CombinerHelper::matchNarrowBinopFeedingAnd( | |||
4723 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
4724 | // Look for a binop feeding into an AND with a mask: | |||
4725 | // | |||
4726 | // %add = G_ADD %lhs, %rhs | |||
4727 | // %and = G_AND %add, 000...11111111 | |||
4728 | // | |||
4729 | // Check if it's possible to perform the binop at a narrower width and zext | |||
4730 | // back to the original width like so: | |||
4731 | // | |||
4732 | // %narrow_lhs = G_TRUNC %lhs | |||
4733 | // %narrow_rhs = G_TRUNC %rhs | |||
4734 | // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs | |||
4735 | // %new_add = G_ZEXT %narrow_add | |||
4736 | // %and = G_AND %new_add, 000...11111111 | |||
4737 | // | |||
4738 | // This can allow later combines to eliminate the G_AND if it turns out | |||
4739 | // that the mask is irrelevant. | |||
4740 | assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4740, __extension__ __PRETTY_FUNCTION__)); | |||
4741 | Register Dst = MI.getOperand(0).getReg(); | |||
4742 | Register AndLHS = MI.getOperand(1).getReg(); | |||
4743 | Register AndRHS = MI.getOperand(2).getReg(); | |||
4744 | LLT WideTy = MRI.getType(Dst); | |||
4745 | ||||
4746 | // If the potential binop has more than one use, then it's possible that one | |||
4747 | // of those uses will need its full width. | |||
4748 | if (!WideTy.isScalar() || !MRI.hasOneNonDBGUse(AndLHS)) | |||
4749 | return false; | |||
4750 | ||||
4751 | // Check if the LHS feeding the AND is impacted by the high bits that we're | |||
4752 | // masking out. | |||
4753 | // | |||
4754 | // e.g. for 64-bit x, y: | |||
4755 | // | |||
4756 | // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535 | |||
4757 | MachineInstr *LHSInst = getDefIgnoringCopies(AndLHS, MRI); | |||
4758 | if (!LHSInst) | |||
4759 | return false; | |||
4760 | unsigned LHSOpc = LHSInst->getOpcode(); | |||
4761 | switch (LHSOpc) { | |||
4762 | default: | |||
4763 | return false; | |||
4764 | case TargetOpcode::G_ADD: | |||
4765 | case TargetOpcode::G_SUB: | |||
4766 | case TargetOpcode::G_MUL: | |||
4767 | case TargetOpcode::G_AND: | |||
4768 | case TargetOpcode::G_OR: | |||
4769 | case TargetOpcode::G_XOR: | |||
4770 | break; | |||
4771 | } | |||
4772 | ||||
4773 | // Find the mask on the RHS. | |||
4774 | auto Cst = getIConstantVRegValWithLookThrough(AndRHS, MRI); | |||
4775 | if (!Cst) | |||
4776 | return false; | |||
4777 | auto Mask = Cst->Value; | |||
4778 | if (!Mask.isMask()) | |||
4779 | return false; | |||
4780 | ||||
4781 | // No point in combining if there's nothing to truncate. | |||
4782 | unsigned NarrowWidth = Mask.countr_one(); | |||
4783 | if (NarrowWidth == WideTy.getSizeInBits()) | |||
4784 | return false; | |||
4785 | LLT NarrowTy = LLT::scalar(NarrowWidth); | |||
4786 | ||||
4787 | // Check if adding the zext + truncates could be harmful. | |||
4788 | auto &MF = *MI.getMF(); | |||
4789 | const auto &TLI = getTargetLowering(); | |||
4790 | LLVMContext &Ctx = MF.getFunction().getContext(); | |||
4791 | auto &DL = MF.getDataLayout(); | |||
4792 | if (!TLI.isTruncateFree(WideTy, NarrowTy, DL, Ctx) || | |||
4793 | !TLI.isZExtFree(NarrowTy, WideTy, DL, Ctx)) | |||
4794 | return false; | |||
4795 | if (!isLegalOrBeforeLegalizer({TargetOpcode::G_TRUNC, {NarrowTy, WideTy}}) || | |||
4796 | !isLegalOrBeforeLegalizer({TargetOpcode::G_ZEXT, {WideTy, NarrowTy}})) | |||
4797 | return false; | |||
4798 | Register BinOpLHS = LHSInst->getOperand(1).getReg(); | |||
4799 | Register BinOpRHS = LHSInst->getOperand(2).getReg(); | |||
4800 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
4801 | auto NarrowLHS = Builder.buildTrunc(NarrowTy, BinOpLHS); | |||
4802 | auto NarrowRHS = Builder.buildTrunc(NarrowTy, BinOpRHS); | |||
4803 | auto NarrowBinOp = | |||
4804 | Builder.buildInstr(LHSOpc, {NarrowTy}, {NarrowLHS, NarrowRHS}); | |||
4805 | auto Ext = Builder.buildZExt(WideTy, NarrowBinOp); | |||
4806 | Observer.changingInstr(MI); | |||
4807 | MI.getOperand(1).setReg(Ext.getReg(0)); | |||
4808 | Observer.changedInstr(MI); | |||
4809 | }; | |||
4810 | return true; | |||
4811 | } | |||
4812 | ||||
4813 | bool CombinerHelper::matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) { | |||
4814 | unsigned Opc = MI.getOpcode(); | |||
4815 | assert(Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO)(static_cast <bool> (Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4815, __extension__ __PRETTY_FUNCTION__)); | |||
4816 | ||||
4817 | if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(2))) | |||
4818 | return false; | |||
4819 | ||||
4820 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
4821 | Observer.changingInstr(MI); | |||
4822 | unsigned NewOpc = Opc == TargetOpcode::G_UMULO ? TargetOpcode::G_UADDO | |||
4823 | : TargetOpcode::G_SADDO; | |||
4824 | MI.setDesc(Builder.getTII().get(NewOpc)); | |||
4825 | MI.getOperand(3).setReg(MI.getOperand(2).getReg()); | |||
4826 | Observer.changedInstr(MI); | |||
4827 | }; | |||
4828 | return true; | |||
4829 | } | |||
4830 | ||||
4831 | bool CombinerHelper::matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) { | |||
4832 | // (G_*MULO x, 0) -> 0 + no carry out | |||
4833 | assert(MI.getOpcode() == TargetOpcode::G_UMULO ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UMULO || MI.getOpcode() == TargetOpcode::G_SMULO) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UMULO || MI.getOpcode() == TargetOpcode::G_SMULO" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4834, __extension__ __PRETTY_FUNCTION__)) | |||
4834 | MI.getOpcode() == TargetOpcode::G_SMULO)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UMULO || MI.getOpcode() == TargetOpcode::G_SMULO) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UMULO || MI.getOpcode() == TargetOpcode::G_SMULO" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4834, __extension__ __PRETTY_FUNCTION__)); | |||
4835 | if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0))) | |||
4836 | return false; | |||
4837 | Register Dst = MI.getOperand(0).getReg(); | |||
4838 | Register Carry = MI.getOperand(1).getReg(); | |||
4839 | if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Dst)) || | |||
4840 | !isConstantLegalOrBeforeLegalizer(MRI.getType(Carry))) | |||
4841 | return false; | |||
4842 | MatchInfo = [=](MachineIRBuilder &B) { | |||
4843 | B.buildConstant(Dst, 0); | |||
4844 | B.buildConstant(Carry, 0); | |||
4845 | }; | |||
4846 | return true; | |||
4847 | } | |||
4848 | ||||
4849 | bool CombinerHelper::matchAddOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) { | |||
4850 | // (G_*ADDO x, 0) -> x + no carry out | |||
4851 | assert(MI.getOpcode() == TargetOpcode::G_UADDO ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDO || MI.getOpcode() == TargetOpcode::G_SADDO) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UADDO || MI.getOpcode() == TargetOpcode::G_SADDO" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4852, __extension__ __PRETTY_FUNCTION__)) | |||
4852 | MI.getOpcode() == TargetOpcode::G_SADDO)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDO || MI.getOpcode() == TargetOpcode::G_SADDO) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UADDO || MI.getOpcode() == TargetOpcode::G_SADDO" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4852, __extension__ __PRETTY_FUNCTION__)); | |||
4853 | if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0))) | |||
4854 | return false; | |||
4855 | Register Carry = MI.getOperand(1).getReg(); | |||
4856 | if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Carry))) | |||
4857 | return false; | |||
4858 | Register Dst = MI.getOperand(0).getReg(); | |||
4859 | Register LHS = MI.getOperand(2).getReg(); | |||
4860 | MatchInfo = [=](MachineIRBuilder &B) { | |||
4861 | B.buildCopy(Dst, LHS); | |||
4862 | B.buildConstant(Carry, 0); | |||
4863 | }; | |||
4864 | return true; | |||
4865 | } | |||
4866 | ||||
4867 | bool CombinerHelper::matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) { | |||
4868 | // (G_*ADDE x, y, 0) -> (G_*ADDO x, y) | |||
4869 | // (G_*SUBE x, y, 0) -> (G_*SUBO x, y) | |||
4870 | assert(MI.getOpcode() == TargetOpcode::G_UADDE ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode:: G_SSUBE) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::G_SSUBE" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4873, __extension__ __PRETTY_FUNCTION__)) | |||
4871 | MI.getOpcode() == TargetOpcode::G_SADDE ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode:: G_SSUBE) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::G_SSUBE" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4873, __extension__ __PRETTY_FUNCTION__)) | |||
4872 | MI.getOpcode() == TargetOpcode::G_USUBE ||(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode:: G_SSUBE) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::G_SSUBE" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4873, __extension__ __PRETTY_FUNCTION__)) | |||
4873 | MI.getOpcode() == TargetOpcode::G_SSUBE)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode:: G_SSUBE) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UADDE || MI.getOpcode() == TargetOpcode::G_SADDE || MI.getOpcode() == TargetOpcode::G_USUBE || MI.getOpcode() == TargetOpcode::G_SSUBE" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4873, __extension__ __PRETTY_FUNCTION__)); | |||
4874 | if (!mi_match(MI.getOperand(4).getReg(), MRI, m_SpecificICstOrSplat(0))) | |||
4875 | return false; | |||
4876 | MatchInfo = [&](MachineIRBuilder &B) { | |||
4877 | unsigned NewOpcode; | |||
| ||||
4878 | switch (MI.getOpcode()) { | |||
4879 | case TargetOpcode::G_UADDE: | |||
4880 | NewOpcode = TargetOpcode::G_UADDO; | |||
4881 | break; | |||
4882 | case TargetOpcode::G_SADDE: | |||
4883 | NewOpcode = TargetOpcode::G_SADDO; | |||
4884 | break; | |||
4885 | case TargetOpcode::G_USUBE: | |||
4886 | NewOpcode = TargetOpcode::G_USUBO; | |||
4887 | break; | |||
4888 | case TargetOpcode::G_SSUBE: | |||
4889 | NewOpcode = TargetOpcode::G_SSUBO; | |||
4890 | break; | |||
4891 | } | |||
4892 | Observer.changingInstr(MI); | |||
4893 | MI.setDesc(B.getTII().get(NewOpcode)); | |||
| ||||
4894 | MI.removeOperand(4); | |||
4895 | Observer.changedInstr(MI); | |||
4896 | }; | |||
4897 | return true; | |||
4898 | } | |||
4899 | ||||
4900 | bool CombinerHelper::matchSubAddSameReg(MachineInstr &MI, | |||
4901 | BuildFnTy &MatchInfo) { | |||
4902 | assert(MI.getOpcode() == TargetOpcode::G_SUB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SUB ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SUB" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4902, __extension__ __PRETTY_FUNCTION__)); | |||
4903 | Register Dst = MI.getOperand(0).getReg(); | |||
4904 | // (x + y) - z -> x (if y == z) | |||
4905 | // (x + y) - z -> y (if x == z) | |||
4906 | Register X, Y, Z; | |||
4907 | if (mi_match(Dst, MRI, m_GSub(m_GAdd(m_Reg(X), m_Reg(Y)), m_Reg(Z)))) { | |||
4908 | Register ReplaceReg; | |||
4909 | int64_t CstX, CstY; | |||
4910 | if (Y == Z || (mi_match(Y, MRI, m_ICstOrSplat(CstY)) && | |||
4911 | mi_match(Z, MRI, m_SpecificICstOrSplat(CstY)))) | |||
4912 | ReplaceReg = X; | |||
4913 | else if (X == Z || (mi_match(X, MRI, m_ICstOrSplat(CstX)) && | |||
4914 | mi_match(Z, MRI, m_SpecificICstOrSplat(CstX)))) | |||
4915 | ReplaceReg = Y; | |||
4916 | if (ReplaceReg) { | |||
4917 | MatchInfo = [=](MachineIRBuilder &B) { B.buildCopy(Dst, ReplaceReg); }; | |||
4918 | return true; | |||
4919 | } | |||
4920 | } | |||
4921 | ||||
4922 | // x - (y + z) -> 0 - y (if x == z) | |||
4923 | // x - (y + z) -> 0 - z (if x == y) | |||
4924 | if (mi_match(Dst, MRI, m_GSub(m_Reg(X), m_GAdd(m_Reg(Y), m_Reg(Z))))) { | |||
4925 | Register ReplaceReg; | |||
4926 | int64_t CstX; | |||
4927 | if (X == Z || (mi_match(X, MRI, m_ICstOrSplat(CstX)) && | |||
4928 | mi_match(Z, MRI, m_SpecificICstOrSplat(CstX)))) | |||
4929 | ReplaceReg = Y; | |||
4930 | else if (X == Y || (mi_match(X, MRI, m_ICstOrSplat(CstX)) && | |||
4931 | mi_match(Y, MRI, m_SpecificICstOrSplat(CstX)))) | |||
4932 | ReplaceReg = Z; | |||
4933 | if (ReplaceReg) { | |||
4934 | MatchInfo = [=](MachineIRBuilder &B) { | |||
4935 | auto Zero = B.buildConstant(MRI.getType(Dst), 0); | |||
4936 | B.buildSub(Dst, Zero, ReplaceReg); | |||
4937 | }; | |||
4938 | return true; | |||
4939 | } | |||
4940 | } | |||
4941 | return false; | |||
4942 | } | |||
4943 | ||||
4944 | MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) { | |||
4945 | assert(MI.getOpcode() == TargetOpcode::G_UDIV)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UDIV ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UDIV" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4945, __extension__ __PRETTY_FUNCTION__)); | |||
4946 | auto &UDiv = cast<GenericMachineInstr>(MI); | |||
4947 | Register Dst = UDiv.getReg(0); | |||
4948 | Register LHS = UDiv.getReg(1); | |||
4949 | Register RHS = UDiv.getReg(2); | |||
4950 | LLT Ty = MRI.getType(Dst); | |||
4951 | LLT ScalarTy = Ty.getScalarType(); | |||
4952 | const unsigned EltBits = ScalarTy.getScalarSizeInBits(); | |||
4953 | LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); | |||
4954 | LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType(); | |||
4955 | auto &MIB = Builder; | |||
4956 | MIB.setInstrAndDebugLoc(MI); | |||
4957 | ||||
4958 | bool UseNPQ = false; | |||
4959 | SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; | |||
4960 | ||||
4961 | auto BuildUDIVPattern = [&](const Constant *C) { | |||
4962 | auto *CI = cast<ConstantInt>(C); | |||
4963 | const APInt &Divisor = CI->getValue(); | |||
4964 | ||||
4965 | bool SelNPQ = false; | |||
4966 | APInt Magic(Divisor.getBitWidth(), 0); | |||
4967 | unsigned PreShift = 0, PostShift = 0; | |||
4968 | ||||
4969 | // Magic algorithm doesn't work for division by 1. We need to emit a select | |||
4970 | // at the end. | |||
4971 | // TODO: Use undef values for divisor of 1. | |||
4972 | if (!Divisor.isOne()) { | |||
4973 | UnsignedDivisionByConstantInfo magics = | |||
4974 | UnsignedDivisionByConstantInfo::get(Divisor); | |||
4975 | ||||
4976 | Magic = std::move(magics.Magic); | |||
4977 | ||||
4978 | assert(magics.PreShift < Divisor.getBitWidth() &&(static_cast <bool> (magics.PreShift < Divisor.getBitWidth () && "We shouldn't generate an undefined shift!") ? void (0) : __assert_fail ("magics.PreShift < Divisor.getBitWidth() && \"We shouldn't generate an undefined shift!\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4979, __extension__ __PRETTY_FUNCTION__)) | |||
4979 | "We shouldn't generate an undefined shift!")(static_cast <bool> (magics.PreShift < Divisor.getBitWidth () && "We shouldn't generate an undefined shift!") ? void (0) : __assert_fail ("magics.PreShift < Divisor.getBitWidth() && \"We shouldn't generate an undefined shift!\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4979, __extension__ __PRETTY_FUNCTION__)); | |||
4980 | assert(magics.PostShift < Divisor.getBitWidth() &&(static_cast <bool> (magics.PostShift < Divisor.getBitWidth () && "We shouldn't generate an undefined shift!") ? void (0) : __assert_fail ("magics.PostShift < Divisor.getBitWidth() && \"We shouldn't generate an undefined shift!\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4981, __extension__ __PRETTY_FUNCTION__)) | |||
4981 | "We shouldn't generate an undefined shift!")(static_cast <bool> (magics.PostShift < Divisor.getBitWidth () && "We shouldn't generate an undefined shift!") ? void (0) : __assert_fail ("magics.PostShift < Divisor.getBitWidth() && \"We shouldn't generate an undefined shift!\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4981, __extension__ __PRETTY_FUNCTION__)); | |||
4982 | assert((!magics.IsAdd || magics.PreShift == 0) && "Unexpected pre-shift")(static_cast <bool> ((!magics.IsAdd || magics.PreShift == 0) && "Unexpected pre-shift") ? void (0) : __assert_fail ("(!magics.IsAdd || magics.PreShift == 0) && \"Unexpected pre-shift\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 4982, __extension__ __PRETTY_FUNCTION__)); | |||
4983 | PreShift = magics.PreShift; | |||
4984 | PostShift = magics.PostShift; | |||
4985 | SelNPQ = magics.IsAdd; | |||
4986 | } | |||
4987 | ||||
4988 | PreShifts.push_back( | |||
4989 | MIB.buildConstant(ScalarShiftAmtTy, PreShift).getReg(0)); | |||
4990 | MagicFactors.push_back(MIB.buildConstant(ScalarTy, Magic).getReg(0)); | |||
4991 | NPQFactors.push_back( | |||
4992 | MIB.buildConstant(ScalarTy, | |||
4993 | SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) | |||
4994 | : APInt::getZero(EltBits)) | |||
4995 | .getReg(0)); | |||
4996 | PostShifts.push_back( | |||
4997 | MIB.buildConstant(ScalarShiftAmtTy, PostShift).getReg(0)); | |||
4998 | UseNPQ |= SelNPQ; | |||
4999 | return true; | |||
5000 | }; | |||
5001 | ||||
5002 | // Collect the shifts/magic values from each element. | |||
5003 | bool Matched = matchUnaryPredicate(MRI, RHS, BuildUDIVPattern); | |||
5004 | (void)Matched; | |||
5005 | assert(Matched && "Expected unary predicate match to succeed")(static_cast <bool> (Matched && "Expected unary predicate match to succeed" ) ? void (0) : __assert_fail ("Matched && \"Expected unary predicate match to succeed\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5005, __extension__ __PRETTY_FUNCTION__)); | |||
5006 | ||||
5007 | Register PreShift, PostShift, MagicFactor, NPQFactor; | |||
5008 | auto *RHSDef = getOpcodeDef<GBuildVector>(RHS, MRI); | |||
5009 | if (RHSDef) { | |||
5010 | PreShift = MIB.buildBuildVector(ShiftAmtTy, PreShifts).getReg(0); | |||
5011 | MagicFactor = MIB.buildBuildVector(Ty, MagicFactors).getReg(0); | |||
5012 | NPQFactor = MIB.buildBuildVector(Ty, NPQFactors).getReg(0); | |||
5013 | PostShift = MIB.buildBuildVector(ShiftAmtTy, PostShifts).getReg(0); | |||
5014 | } else { | |||
5015 | assert(MRI.getType(RHS).isScalar() &&(static_cast <bool> (MRI.getType(RHS).isScalar() && "Non-build_vector operation should have been a scalar") ? void (0) : __assert_fail ("MRI.getType(RHS).isScalar() && \"Non-build_vector operation should have been a scalar\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5016, __extension__ __PRETTY_FUNCTION__)) | |||
5016 | "Non-build_vector operation should have been a scalar")(static_cast <bool> (MRI.getType(RHS).isScalar() && "Non-build_vector operation should have been a scalar") ? void (0) : __assert_fail ("MRI.getType(RHS).isScalar() && \"Non-build_vector operation should have been a scalar\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5016, __extension__ __PRETTY_FUNCTION__)); | |||
5017 | PreShift = PreShifts[0]; | |||
5018 | MagicFactor = MagicFactors[0]; | |||
5019 | PostShift = PostShifts[0]; | |||
5020 | } | |||
5021 | ||||
5022 | Register Q = LHS; | |||
5023 | Q = MIB.buildLShr(Ty, Q, PreShift).getReg(0); | |||
5024 | ||||
5025 | // Multiply the numerator (operand 0) by the magic value. | |||
5026 | Q = MIB.buildUMulH(Ty, Q, MagicFactor).getReg(0); | |||
5027 | ||||
5028 | if (UseNPQ) { | |||
5029 | Register NPQ = MIB.buildSub(Ty, LHS, Q).getReg(0); | |||
5030 | ||||
5031 | // For vectors we might have a mix of non-NPQ/NPQ paths, so use | |||
5032 | // G_UMULH to act as a SRL-by-1 for NPQ, else multiply by zero. | |||
5033 | if (Ty.isVector()) | |||
5034 | NPQ = MIB.buildUMulH(Ty, NPQ, NPQFactor).getReg(0); | |||
5035 | else | |||
5036 | NPQ = MIB.buildLShr(Ty, NPQ, MIB.buildConstant(ShiftAmtTy, 1)).getReg(0); | |||
5037 | ||||
5038 | Q = MIB.buildAdd(Ty, NPQ, Q).getReg(0); | |||
5039 | } | |||
5040 | ||||
5041 | Q = MIB.buildLShr(Ty, Q, PostShift).getReg(0); | |||
5042 | auto One = MIB.buildConstant(Ty, 1); | |||
5043 | auto IsOne = MIB.buildICmp( | |||
5044 | CmpInst::Predicate::ICMP_EQ, | |||
5045 | Ty.isScalar() ? LLT::scalar(1) : Ty.changeElementSize(1), RHS, One); | |||
5046 | return MIB.buildSelect(Ty, IsOne, LHS, Q); | |||
5047 | } | |||
5048 | ||||
5049 | bool CombinerHelper::matchUDivByConst(MachineInstr &MI) { | |||
5050 | assert(MI.getOpcode() == TargetOpcode::G_UDIV)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UDIV ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UDIV" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5050, __extension__ __PRETTY_FUNCTION__)); | |||
5051 | Register Dst = MI.getOperand(0).getReg(); | |||
5052 | Register RHS = MI.getOperand(2).getReg(); | |||
5053 | LLT DstTy = MRI.getType(Dst); | |||
5054 | auto *RHSDef = MRI.getVRegDef(RHS); | |||
5055 | if (!isConstantOrConstantVector(*RHSDef, MRI)) | |||
5056 | return false; | |||
5057 | ||||
5058 | auto &MF = *MI.getMF(); | |||
5059 | AttributeList Attr = MF.getFunction().getAttributes(); | |||
5060 | const auto &TLI = getTargetLowering(); | |||
5061 | LLVMContext &Ctx = MF.getFunction().getContext(); | |||
5062 | auto &DL = MF.getDataLayout(); | |||
5063 | if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr)) | |||
5064 | return false; | |||
5065 | ||||
5066 | // Don't do this for minsize because the instruction sequence is usually | |||
5067 | // larger. | |||
5068 | if (MF.getFunction().hasMinSize()) | |||
5069 | return false; | |||
5070 | ||||
5071 | // Don't do this if the types are not going to be legal. | |||
5072 | if (LI) { | |||
5073 | if (!isLegalOrBeforeLegalizer({TargetOpcode::G_MUL, {DstTy, DstTy}})) | |||
5074 | return false; | |||
5075 | if (!isLegalOrBeforeLegalizer({TargetOpcode::G_UMULH, {DstTy}})) | |||
5076 | return false; | |||
5077 | if (!isLegalOrBeforeLegalizer( | |||
5078 | {TargetOpcode::G_ICMP, | |||
5079 | {DstTy.isVector() ? DstTy.changeElementSize(1) : LLT::scalar(1), | |||
5080 | DstTy}})) | |||
5081 | return false; | |||
5082 | } | |||
5083 | ||||
5084 | auto CheckEltValue = [&](const Constant *C) { | |||
5085 | if (auto *CI = dyn_cast_or_null<ConstantInt>(C)) | |||
5086 | return !CI->isZero(); | |||
5087 | return false; | |||
5088 | }; | |||
5089 | return matchUnaryPredicate(MRI, RHS, CheckEltValue); | |||
5090 | } | |||
5091 | ||||
5092 | void CombinerHelper::applyUDivByConst(MachineInstr &MI) { | |||
5093 | auto *NewMI = buildUDivUsingMul(MI); | |||
5094 | replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg()); | |||
5095 | } | |||
5096 | ||||
5097 | bool CombinerHelper::matchSDivByConst(MachineInstr &MI) { | |||
5098 | assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SDIV && \"Expected SDIV\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5098, __extension__ __PRETTY_FUNCTION__)); | |||
5099 | Register Dst = MI.getOperand(0).getReg(); | |||
5100 | Register RHS = MI.getOperand(2).getReg(); | |||
5101 | LLT DstTy = MRI.getType(Dst); | |||
5102 | ||||
5103 | auto &MF = *MI.getMF(); | |||
5104 | AttributeList Attr = MF.getFunction().getAttributes(); | |||
5105 | const auto &TLI = getTargetLowering(); | |||
5106 | LLVMContext &Ctx = MF.getFunction().getContext(); | |||
5107 | auto &DL = MF.getDataLayout(); | |||
5108 | if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr)) | |||
5109 | return false; | |||
5110 | ||||
5111 | // Don't do this for minsize because the instruction sequence is usually | |||
5112 | // larger. | |||
5113 | if (MF.getFunction().hasMinSize()) | |||
5114 | return false; | |||
5115 | ||||
5116 | // If the sdiv has an 'exact' flag we can use a simpler lowering. | |||
5117 | if (MI.getFlag(MachineInstr::MIFlag::IsExact)) { | |||
5118 | return matchUnaryPredicate( | |||
5119 | MRI, RHS, [](const Constant *C) { return C && !C->isZeroValue(); }); | |||
5120 | } | |||
5121 | ||||
5122 | // Don't support the general case for now. | |||
5123 | return false; | |||
5124 | } | |||
5125 | ||||
5126 | void CombinerHelper::applySDivByConst(MachineInstr &MI) { | |||
5127 | auto *NewMI = buildSDivUsingMul(MI); | |||
5128 | replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg()); | |||
5129 | } | |||
5130 | ||||
5131 | MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) { | |||
5132 | assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SDIV && \"Expected SDIV\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5132, __extension__ __PRETTY_FUNCTION__)); | |||
5133 | auto &SDiv = cast<GenericMachineInstr>(MI); | |||
5134 | Register Dst = SDiv.getReg(0); | |||
5135 | Register LHS = SDiv.getReg(1); | |||
5136 | Register RHS = SDiv.getReg(2); | |||
5137 | LLT Ty = MRI.getType(Dst); | |||
5138 | LLT ScalarTy = Ty.getScalarType(); | |||
5139 | LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); | |||
5140 | LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType(); | |||
5141 | auto &MIB = Builder; | |||
5142 | MIB.setInstrAndDebugLoc(MI); | |||
5143 | ||||
5144 | bool UseSRA = false; | |||
5145 | SmallVector<Register, 16> Shifts, Factors; | |||
5146 | ||||
5147 | auto *RHSDef = cast<GenericMachineInstr>(getDefIgnoringCopies(RHS, MRI)); | |||
5148 | bool IsSplat = getIConstantSplatVal(*RHSDef, MRI).has_value(); | |||
5149 | ||||
5150 | auto BuildSDIVPattern = [&](const Constant *C) { | |||
5151 | // Don't recompute inverses for each splat element. | |||
5152 | if (IsSplat && !Factors.empty()) { | |||
5153 | Shifts.push_back(Shifts[0]); | |||
5154 | Factors.push_back(Factors[0]); | |||
5155 | return true; | |||
5156 | } | |||
5157 | ||||
5158 | auto *CI = cast<ConstantInt>(C); | |||
5159 | APInt Divisor = CI->getValue(); | |||
5160 | unsigned Shift = Divisor.countr_zero(); | |||
5161 | if (Shift) { | |||
5162 | Divisor.ashrInPlace(Shift); | |||
5163 | UseSRA = true; | |||
5164 | } | |||
5165 | ||||
5166 | // Calculate the multiplicative inverse modulo BW. | |||
5167 | // 2^W requires W + 1 bits, so we have to extend and then truncate. | |||
5168 | unsigned W = Divisor.getBitWidth(); | |||
5169 | APInt Factor = Divisor.zext(W + 1) | |||
5170 | .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) | |||
5171 | .trunc(W); | |||
5172 | Shifts.push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0)); | |||
5173 | Factors.push_back(MIB.buildConstant(ScalarTy, Factor).getReg(0)); | |||
5174 | return true; | |||
5175 | }; | |||
5176 | ||||
5177 | // Collect all magic values from the build vector. | |||
5178 | bool Matched = matchUnaryPredicate(MRI, RHS, BuildSDIVPattern); | |||
5179 | (void)Matched; | |||
5180 | assert(Matched && "Expected unary predicate match to succeed")(static_cast <bool> (Matched && "Expected unary predicate match to succeed" ) ? void (0) : __assert_fail ("Matched && \"Expected unary predicate match to succeed\"" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5180, __extension__ __PRETTY_FUNCTION__)); | |||
5181 | ||||
5182 | Register Shift, Factor; | |||
5183 | if (Ty.isVector()) { | |||
5184 | Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0); | |||
5185 | Factor = MIB.buildBuildVector(Ty, Factors).getReg(0); | |||
5186 | } else { | |||
5187 | Shift = Shifts[0]; | |||
5188 | Factor = Factors[0]; | |||
5189 | } | |||
5190 | ||||
5191 | Register Res = LHS; | |||
5192 | ||||
5193 | if (UseSRA) | |||
5194 | Res = MIB.buildAShr(Ty, Res, Shift, MachineInstr::IsExact).getReg(0); | |||
5195 | ||||
5196 | return MIB.buildMul(Ty, Res, Factor); | |||
5197 | } | |||
5198 | ||||
5199 | bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) { | |||
5200 | assert(MI.getOpcode() == TargetOpcode::G_UMULH)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_UMULH ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_UMULH" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5200, __extension__ __PRETTY_FUNCTION__)); | |||
5201 | Register RHS = MI.getOperand(2).getReg(); | |||
5202 | Register Dst = MI.getOperand(0).getReg(); | |||
5203 | LLT Ty = MRI.getType(Dst); | |||
5204 | LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); | |||
5205 | auto MatchPow2ExceptOne = [&](const Constant *C) { | |||
5206 | if (auto *CI = dyn_cast<ConstantInt>(C)) | |||
5207 | return CI->getValue().isPowerOf2() && !CI->getValue().isOne(); | |||
5208 | return false; | |||
5209 | }; | |||
5210 | if (!matchUnaryPredicate(MRI, RHS, MatchPow2ExceptOne, false)) | |||
5211 | return false; | |||
5212 | return isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR, {Ty, ShiftAmtTy}}); | |||
5213 | } | |||
5214 | ||||
5215 | void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) { | |||
5216 | Register LHS = MI.getOperand(1).getReg(); | |||
5217 | Register RHS = MI.getOperand(2).getReg(); | |||
5218 | Register Dst = MI.getOperand(0).getReg(); | |||
5219 | LLT Ty = MRI.getType(Dst); | |||
5220 | LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); | |||
5221 | unsigned NumEltBits = Ty.getScalarSizeInBits(); | |||
5222 | ||||
5223 | Builder.setInstrAndDebugLoc(MI); | |||
5224 | auto LogBase2 = buildLogBase2(RHS, Builder); | |||
5225 | auto ShiftAmt = | |||
5226 | Builder.buildSub(Ty, Builder.buildConstant(Ty, NumEltBits), LogBase2); | |||
5227 | auto Trunc = Builder.buildZExtOrTrunc(ShiftAmtTy, ShiftAmt); | |||
5228 | Builder.buildLShr(Dst, LHS, Trunc); | |||
5229 | MI.eraseFromParent(); | |||
5230 | } | |||
5231 | ||||
5232 | bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI, | |||
5233 | BuildFnTy &MatchInfo) { | |||
5234 | unsigned Opc = MI.getOpcode(); | |||
5235 | assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB ||(static_cast <bool> (Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5237, __extension__ __PRETTY_FUNCTION__)) | |||
5236 | Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||(static_cast <bool> (Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5237, __extension__ __PRETTY_FUNCTION__)) | |||
5237 | Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA)(static_cast <bool> (Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) ? void (0) : __assert_fail ("Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5237, __extension__ __PRETTY_FUNCTION__)); | |||
5238 | ||||
5239 | Register Dst = MI.getOperand(0).getReg(); | |||
5240 | Register X = MI.getOperand(1).getReg(); | |||
5241 | Register Y = MI.getOperand(2).getReg(); | |||
5242 | LLT Type = MRI.getType(Dst); | |||
5243 | ||||
5244 | // fold (fadd x, fneg(y)) -> (fsub x, y) | |||
5245 | // fold (fadd fneg(y), x) -> (fsub x, y) | |||
5246 | // G_ADD is commutative so both cases are checked by m_GFAdd | |||
5247 | if (mi_match(Dst, MRI, m_GFAdd(m_Reg(X), m_GFNeg(m_Reg(Y)))) && | |||
5248 | isLegalOrBeforeLegalizer({TargetOpcode::G_FSUB, {Type}})) { | |||
5249 | Opc = TargetOpcode::G_FSUB; | |||
5250 | } | |||
5251 | /// fold (fsub x, fneg(y)) -> (fadd x, y) | |||
5252 | else if (mi_match(Dst, MRI, m_GFSub(m_Reg(X), m_GFNeg(m_Reg(Y)))) && | |||
5253 | isLegalOrBeforeLegalizer({TargetOpcode::G_FADD, {Type}})) { | |||
5254 | Opc = TargetOpcode::G_FADD; | |||
5255 | } | |||
5256 | // fold (fmul fneg(x), fneg(y)) -> (fmul x, y) | |||
5257 | // fold (fdiv fneg(x), fneg(y)) -> (fdiv x, y) | |||
5258 | // fold (fmad fneg(x), fneg(y), z) -> (fmad x, y, z) | |||
5259 | // fold (fma fneg(x), fneg(y), z) -> (fma x, y, z) | |||
5260 | else if ((Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || | |||
5261 | Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) && | |||
5262 | mi_match(X, MRI, m_GFNeg(m_Reg(X))) && | |||
5263 | mi_match(Y, MRI, m_GFNeg(m_Reg(Y)))) { | |||
5264 | // no opcode change | |||
5265 | } else | |||
5266 | return false; | |||
5267 | ||||
5268 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
5269 | Observer.changingInstr(MI); | |||
5270 | MI.setDesc(B.getTII().get(Opc)); | |||
5271 | MI.getOperand(1).setReg(X); | |||
5272 | MI.getOperand(2).setReg(Y); | |||
5273 | Observer.changedInstr(MI); | |||
5274 | }; | |||
5275 | return true; | |||
5276 | } | |||
5277 | ||||
5278 | bool CombinerHelper::matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) { | |||
5279 | assert(MI.getOpcode() == TargetOpcode::G_FSUB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FSUB ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FSUB" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5279, __extension__ __PRETTY_FUNCTION__)); | |||
5280 | ||||
5281 | Register LHS = MI.getOperand(1).getReg(); | |||
5282 | MatchInfo = MI.getOperand(2).getReg(); | |||
5283 | LLT Ty = MRI.getType(MI.getOperand(0).getReg()); | |||
5284 | ||||
5285 | const auto LHSCst = Ty.isVector() | |||
5286 | ? getFConstantSplat(LHS, MRI, /* allowUndef */ true) | |||
5287 | : getFConstantVRegValWithLookThrough(LHS, MRI); | |||
5288 | if (!LHSCst) | |||
5289 | return false; | |||
5290 | ||||
5291 | // -0.0 is always allowed | |||
5292 | if (LHSCst->Value.isNegZero()) | |||
5293 | return true; | |||
5294 | ||||
5295 | // +0.0 is only allowed if nsz is set. | |||
5296 | if (LHSCst->Value.isPosZero()) | |||
5297 | return MI.getFlag(MachineInstr::FmNsz); | |||
5298 | ||||
5299 | return false; | |||
5300 | } | |||
5301 | ||||
5302 | void CombinerHelper::applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) { | |||
5303 | Builder.setInstrAndDebugLoc(MI); | |||
5304 | Register Dst = MI.getOperand(0).getReg(); | |||
5305 | Builder.buildFNeg( | |||
5306 | Dst, Builder.buildFCanonicalize(MRI.getType(Dst), MatchInfo).getReg(0)); | |||
5307 | eraseInst(MI); | |||
5308 | } | |||
5309 | ||||
5310 | /// Checks if \p MI is TargetOpcode::G_FMUL and contractable either | |||
5311 | /// due to global flags or MachineInstr flags. | |||
5312 | static bool isContractableFMul(MachineInstr &MI, bool AllowFusionGlobally) { | |||
5313 | if (MI.getOpcode() != TargetOpcode::G_FMUL) | |||
5314 | return false; | |||
5315 | return AllowFusionGlobally || MI.getFlag(MachineInstr::MIFlag::FmContract); | |||
5316 | } | |||
5317 | ||||
5318 | static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1, | |||
5319 | const MachineRegisterInfo &MRI) { | |||
5320 | return std::distance(MRI.use_instr_nodbg_begin(MI0.getOperand(0).getReg()), | |||
5321 | MRI.use_instr_nodbg_end()) > | |||
5322 | std::distance(MRI.use_instr_nodbg_begin(MI1.getOperand(0).getReg()), | |||
5323 | MRI.use_instr_nodbg_end()); | |||
5324 | } | |||
5325 | ||||
5326 | bool CombinerHelper::canCombineFMadOrFMA(MachineInstr &MI, | |||
5327 | bool &AllowFusionGlobally, | |||
5328 | bool &HasFMAD, bool &Aggressive, | |||
5329 | bool CanReassociate) { | |||
5330 | ||||
5331 | auto *MF = MI.getMF(); | |||
5332 | const auto &TLI = *MF->getSubtarget().getTargetLowering(); | |||
5333 | const TargetOptions &Options = MF->getTarget().Options; | |||
5334 | LLT DstType = MRI.getType(MI.getOperand(0).getReg()); | |||
5335 | ||||
5336 | if (CanReassociate && | |||
5337 | !(Options.UnsafeFPMath || MI.getFlag(MachineInstr::MIFlag::FmReassoc))) | |||
5338 | return false; | |||
5339 | ||||
5340 | // Floating-point multiply-add with intermediate rounding. | |||
5341 | HasFMAD = (!isPreLegalize() && TLI.isFMADLegal(MI, DstType)); | |||
5342 | // Floating-point multiply-add without intermediate rounding. | |||
5343 | bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) && | |||
5344 | isLegalOrBeforeLegalizer({TargetOpcode::G_FMA, {DstType}}); | |||
5345 | // No valid opcode, do not combine. | |||
5346 | if (!HasFMAD && !HasFMA) | |||
5347 | return false; | |||
5348 | ||||
5349 | AllowFusionGlobally = Options.AllowFPOpFusion == FPOpFusion::Fast || | |||
5350 | Options.UnsafeFPMath || HasFMAD; | |||
5351 | // If the addition is not contractable, do not combine. | |||
5352 | if (!AllowFusionGlobally && !MI.getFlag(MachineInstr::MIFlag::FmContract)) | |||
5353 | return false; | |||
5354 | ||||
5355 | Aggressive = TLI.enableAggressiveFMAFusion(DstType); | |||
5356 | return true; | |||
5357 | } | |||
5358 | ||||
5359 | bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA( | |||
5360 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
5361 | assert(MI.getOpcode() == TargetOpcode::G_FADD)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FADD ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FADD" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5361, __extension__ __PRETTY_FUNCTION__)); | |||
5362 | ||||
5363 | bool AllowFusionGlobally, HasFMAD, Aggressive; | |||
5364 | if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) | |||
5365 | return false; | |||
5366 | ||||
5367 | Register Op1 = MI.getOperand(1).getReg(); | |||
5368 | Register Op2 = MI.getOperand(2).getReg(); | |||
5369 | DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; | |||
5370 | DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; | |||
5371 | unsigned PreferredFusedOpcode = | |||
5372 | HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; | |||
5373 | ||||
5374 | // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), | |||
5375 | // prefer to fold the multiply with fewer uses. | |||
5376 | if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && | |||
5377 | isContractableFMul(*RHS.MI, AllowFusionGlobally)) { | |||
5378 | if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) | |||
5379 | std::swap(LHS, RHS); | |||
5380 | } | |||
5381 | ||||
5382 | // fold (fadd (fmul x, y), z) -> (fma x, y, z) | |||
5383 | if (isContractableFMul(*LHS.MI, AllowFusionGlobally) && | |||
5384 | (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg))) { | |||
5385 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
5386 | B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, | |||
5387 | {LHS.MI->getOperand(1).getReg(), | |||
5388 | LHS.MI->getOperand(2).getReg(), RHS.Reg}); | |||
5389 | }; | |||
5390 | return true; | |||
5391 | } | |||
5392 | ||||
5393 | // fold (fadd x, (fmul y, z)) -> (fma y, z, x) | |||
5394 | if (isContractableFMul(*RHS.MI, AllowFusionGlobally) && | |||
5395 | (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg))) { | |||
5396 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
5397 | B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, | |||
5398 | {RHS.MI->getOperand(1).getReg(), | |||
5399 | RHS.MI->getOperand(2).getReg(), LHS.Reg}); | |||
5400 | }; | |||
5401 | return true; | |||
5402 | } | |||
5403 | ||||
5404 | return false; | |||
5405 | } | |||
5406 | ||||
5407 | bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA( | |||
5408 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
5409 | assert(MI.getOpcode() == TargetOpcode::G_FADD)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FADD ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FADD" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5409, __extension__ __PRETTY_FUNCTION__)); | |||
5410 | ||||
5411 | bool AllowFusionGlobally, HasFMAD, Aggressive; | |||
5412 | if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) | |||
5413 | return false; | |||
5414 | ||||
5415 | const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering(); | |||
5416 | Register Op1 = MI.getOperand(1).getReg(); | |||
5417 | Register Op2 = MI.getOperand(2).getReg(); | |||
5418 | DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; | |||
5419 | DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; | |||
5420 | LLT DstType = MRI.getType(MI.getOperand(0).getReg()); | |||
5421 | ||||
5422 | unsigned PreferredFusedOpcode = | |||
5423 | HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; | |||
5424 | ||||
5425 | // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), | |||
5426 | // prefer to fold the multiply with fewer uses. | |||
5427 | if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && | |||
5428 | isContractableFMul(*RHS.MI, AllowFusionGlobally)) { | |||
5429 | if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) | |||
5430 | std::swap(LHS, RHS); | |||
5431 | } | |||
5432 | ||||
5433 | // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) | |||
5434 | MachineInstr *FpExtSrc; | |||
5435 | if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) && | |||
5436 | isContractableFMul(*FpExtSrc, AllowFusionGlobally) && | |||
5437 | TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, | |||
5438 | MRI.getType(FpExtSrc->getOperand(1).getReg()))) { | |||
5439 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
5440 | auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg()); | |||
5441 | auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg()); | |||
5442 | B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, | |||
5443 | {FpExtX.getReg(0), FpExtY.getReg(0), RHS.Reg}); | |||
5444 | }; | |||
5445 | return true; | |||
5446 | } | |||
5447 | ||||
5448 | // fold (fadd z, (fpext (fmul x, y))) -> (fma (fpext x), (fpext y), z) | |||
5449 | // Note: Commutes FADD operands. | |||
5450 | if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) && | |||
5451 | isContractableFMul(*FpExtSrc, AllowFusionGlobally) && | |||
5452 | TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, | |||
5453 | MRI.getType(FpExtSrc->getOperand(1).getReg()))) { | |||
5454 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
5455 | auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg()); | |||
5456 | auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg()); | |||
5457 | B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, | |||
5458 | {FpExtX.getReg(0), FpExtY.getReg(0), LHS.Reg}); | |||
5459 | }; | |||
5460 | return true; | |||
5461 | } | |||
5462 | ||||
5463 | return false; | |||
5464 | } | |||
5465 | ||||
5466 | bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA( | |||
5467 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
5468 | assert(MI.getOpcode() == TargetOpcode::G_FADD)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FADD ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FADD" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5468, __extension__ __PRETTY_FUNCTION__)); | |||
5469 | ||||
5470 | bool AllowFusionGlobally, HasFMAD, Aggressive; | |||
5471 | if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive, true)) | |||
5472 | return false; | |||
5473 | ||||
5474 | Register Op1 = MI.getOperand(1).getReg(); | |||
5475 | Register Op2 = MI.getOperand(2).getReg(); | |||
5476 | DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; | |||
5477 | DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; | |||
5478 | LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); | |||
5479 | ||||
5480 | unsigned PreferredFusedOpcode = | |||
5481 | HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; | |||
5482 | ||||
5483 | // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), | |||
5484 | // prefer to fold the multiply with fewer uses. | |||
5485 | if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && | |||
5486 | isContractableFMul(*RHS.MI, AllowFusionGlobally)) { | |||
5487 | if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) | |||
5488 | std::swap(LHS, RHS); | |||
5489 | } | |||
5490 | ||||
5491 | MachineInstr *FMA = nullptr; | |||
5492 | Register Z; | |||
5493 | // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) | |||
5494 | if (LHS.MI->getOpcode() == PreferredFusedOpcode && | |||
5495 | (MRI.getVRegDef(LHS.MI->getOperand(3).getReg())->getOpcode() == | |||
5496 | TargetOpcode::G_FMUL) && | |||
5497 | MRI.hasOneNonDBGUse(LHS.MI->getOperand(0).getReg()) && | |||
5498 | MRI.hasOneNonDBGUse(LHS.MI->getOperand(3).getReg())) { | |||
5499 | FMA = LHS.MI; | |||
5500 | Z = RHS.Reg; | |||
5501 | } | |||
5502 | // fold (fadd z, (fma x, y, (fmul u, v))) -> (fma x, y, (fma u, v, z)) | |||
5503 | else if (RHS.MI->getOpcode() == PreferredFusedOpcode && | |||
5504 | (MRI.getVRegDef(RHS.MI->getOperand(3).getReg())->getOpcode() == | |||
5505 | TargetOpcode::G_FMUL) && | |||
5506 | MRI.hasOneNonDBGUse(RHS.MI->getOperand(0).getReg()) && | |||
5507 | MRI.hasOneNonDBGUse(RHS.MI->getOperand(3).getReg())) { | |||
5508 | Z = LHS.Reg; | |||
5509 | FMA = RHS.MI; | |||
5510 | } | |||
5511 | ||||
5512 | if (FMA) { | |||
5513 | MachineInstr *FMulMI = MRI.getVRegDef(FMA->getOperand(3).getReg()); | |||
5514 | Register X = FMA->getOperand(1).getReg(); | |||
5515 | Register Y = FMA->getOperand(2).getReg(); | |||
5516 | Register U = FMulMI->getOperand(1).getReg(); | |||
5517 | Register V = FMulMI->getOperand(2).getReg(); | |||
5518 | ||||
5519 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
5520 | Register InnerFMA = MRI.createGenericVirtualRegister(DstTy); | |||
5521 | B.buildInstr(PreferredFusedOpcode, {InnerFMA}, {U, V, Z}); | |||
5522 | B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, | |||
5523 | {X, Y, InnerFMA}); | |||
5524 | }; | |||
5525 | return true; | |||
5526 | } | |||
5527 | ||||
5528 | return false; | |||
5529 | } | |||
5530 | ||||
5531 | bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive( | |||
5532 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
5533 | assert(MI.getOpcode() == TargetOpcode::G_FADD)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FADD ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FADD" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5533, __extension__ __PRETTY_FUNCTION__)); | |||
5534 | ||||
5535 | bool AllowFusionGlobally, HasFMAD, Aggressive; | |||
5536 | if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) | |||
5537 | return false; | |||
5538 | ||||
5539 | if (!Aggressive) | |||
5540 | return false; | |||
5541 | ||||
5542 | const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering(); | |||
5543 | LLT DstType = MRI.getType(MI.getOperand(0).getReg()); | |||
5544 | Register Op1 = MI.getOperand(1).getReg(); | |||
5545 | Register Op2 = MI.getOperand(2).getReg(); | |||
5546 | DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; | |||
5547 | DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; | |||
5548 | ||||
5549 | unsigned PreferredFusedOpcode = | |||
5550 | HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; | |||
5551 | ||||
5552 | // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), | |||
5553 | // prefer to fold the multiply with fewer uses. | |||
5554 | if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && | |||
5555 | isContractableFMul(*RHS.MI, AllowFusionGlobally)) { | |||
5556 | if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) | |||
5557 | std::swap(LHS, RHS); | |||
5558 | } | |||
5559 | ||||
5560 | // Builds: (fma x, y, (fma (fpext u), (fpext v), z)) | |||
5561 | auto buildMatchInfo = [=, &MI](Register U, Register V, Register Z, Register X, | |||
5562 | Register Y, MachineIRBuilder &B) { | |||
5563 | Register FpExtU = B.buildFPExt(DstType, U).getReg(0); | |||
5564 | Register FpExtV = B.buildFPExt(DstType, V).getReg(0); | |||
5565 | Register InnerFMA = | |||
5566 | B.buildInstr(PreferredFusedOpcode, {DstType}, {FpExtU, FpExtV, Z}) | |||
5567 | .getReg(0); | |||
5568 | B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, | |||
5569 | {X, Y, InnerFMA}); | |||
5570 | }; | |||
5571 | ||||
5572 | MachineInstr *FMulMI, *FMAMI; | |||
5573 | // fold (fadd (fma x, y, (fpext (fmul u, v))), z) | |||
5574 | // -> (fma x, y, (fma (fpext u), (fpext v), z)) | |||
5575 | if (LHS.MI->getOpcode() == PreferredFusedOpcode && | |||
5576 | mi_match(LHS.MI->getOperand(3).getReg(), MRI, | |||
5577 | m_GFPExt(m_MInstr(FMulMI))) && | |||
5578 | isContractableFMul(*FMulMI, AllowFusionGlobally) && | |||
5579 | TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, | |||
5580 | MRI.getType(FMulMI->getOperand(0).getReg()))) { | |||
5581 | MatchInfo = [=](MachineIRBuilder &B) { | |||
5582 | buildMatchInfo(FMulMI->getOperand(1).getReg(), | |||
5583 | FMulMI->getOperand(2).getReg(), RHS.Reg, | |||
5584 | LHS.MI->getOperand(1).getReg(), | |||
5585 | LHS.MI->getOperand(2).getReg(), B); | |||
5586 | }; | |||
5587 | return true; | |||
5588 | } | |||
5589 | ||||
5590 | // fold (fadd (fpext (fma x, y, (fmul u, v))), z) | |||
5591 | // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) | |||
5592 | // FIXME: This turns two single-precision and one double-precision | |||
5593 | // operation into two double-precision operations, which might not be | |||
5594 | // interesting for all targets, especially GPUs. | |||
5595 | if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) && | |||
5596 | FMAMI->getOpcode() == PreferredFusedOpcode) { | |||
5597 | MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg()); | |||
5598 | if (isContractableFMul(*FMulMI, AllowFusionGlobally) && | |||
5599 | TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, | |||
5600 | MRI.getType(FMAMI->getOperand(0).getReg()))) { | |||
5601 | MatchInfo = [=](MachineIRBuilder &B) { | |||
5602 | Register X = FMAMI->getOperand(1).getReg(); | |||
5603 | Register Y = FMAMI->getOperand(2).getReg(); | |||
5604 | X = B.buildFPExt(DstType, X).getReg(0); | |||
5605 | Y = B.buildFPExt(DstType, Y).getReg(0); | |||
5606 | buildMatchInfo(FMulMI->getOperand(1).getReg(), | |||
5607 | FMulMI->getOperand(2).getReg(), RHS.Reg, X, Y, B); | |||
5608 | }; | |||
5609 | ||||
5610 | return true; | |||
5611 | } | |||
5612 | } | |||
5613 | ||||
5614 | // fold (fadd z, (fma x, y, (fpext (fmul u, v))) | |||
5615 | // -> (fma x, y, (fma (fpext u), (fpext v), z)) | |||
5616 | if (RHS.MI->getOpcode() == PreferredFusedOpcode && | |||
5617 | mi_match(RHS.MI->getOperand(3).getReg(), MRI, | |||
5618 | m_GFPExt(m_MInstr(FMulMI))) && | |||
5619 | isContractableFMul(*FMulMI, AllowFusionGlobally) && | |||
5620 | TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, | |||
5621 | MRI.getType(FMulMI->getOperand(0).getReg()))) { | |||
5622 | MatchInfo = [=](MachineIRBuilder &B) { | |||
5623 | buildMatchInfo(FMulMI->getOperand(1).getReg(), | |||
5624 | FMulMI->getOperand(2).getReg(), LHS.Reg, | |||
5625 | RHS.MI->getOperand(1).getReg(), | |||
5626 | RHS.MI->getOperand(2).getReg(), B); | |||
5627 | }; | |||
5628 | return true; | |||
5629 | } | |||
5630 | ||||
5631 | // fold (fadd z, (fpext (fma x, y, (fmul u, v))) | |||
5632 | // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) | |||
5633 | // FIXME: This turns two single-precision and one double-precision | |||
5634 | // operation into two double-precision operations, which might not be | |||
5635 | // interesting for all targets, especially GPUs. | |||
5636 | if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) && | |||
5637 | FMAMI->getOpcode() == PreferredFusedOpcode) { | |||
5638 | MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg()); | |||
5639 | if (isContractableFMul(*FMulMI, AllowFusionGlobally) && | |||
5640 | TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, | |||
5641 | MRI.getType(FMAMI->getOperand(0).getReg()))) { | |||
5642 | MatchInfo = [=](MachineIRBuilder &B) { | |||
5643 | Register X = FMAMI->getOperand(1).getReg(); | |||
5644 | Register Y = FMAMI->getOperand(2).getReg(); | |||
5645 | X = B.buildFPExt(DstType, X).getReg(0); | |||
5646 | Y = B.buildFPExt(DstType, Y).getReg(0); | |||
5647 | buildMatchInfo(FMulMI->getOperand(1).getReg(), | |||
5648 | FMulMI->getOperand(2).getReg(), LHS.Reg, X, Y, B); | |||
5649 | }; | |||
5650 | return true; | |||
5651 | } | |||
5652 | } | |||
5653 | ||||
5654 | return false; | |||
5655 | } | |||
5656 | ||||
5657 | bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA( | |||
5658 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
5659 | assert(MI.getOpcode() == TargetOpcode::G_FSUB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FSUB ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FSUB" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5659, __extension__ __PRETTY_FUNCTION__)); | |||
5660 | ||||
5661 | bool AllowFusionGlobally, HasFMAD, Aggressive; | |||
5662 | if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) | |||
5663 | return false; | |||
5664 | ||||
5665 | Register Op1 = MI.getOperand(1).getReg(); | |||
5666 | Register Op2 = MI.getOperand(2).getReg(); | |||
5667 | DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; | |||
5668 | DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; | |||
5669 | LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); | |||
5670 | ||||
5671 | // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), | |||
5672 | // prefer to fold the multiply with fewer uses. | |||
5673 | int FirstMulHasFewerUses = true; | |||
5674 | if (isContractableFMul(*LHS.MI, AllowFusionGlobally) && | |||
5675 | isContractableFMul(*RHS.MI, AllowFusionGlobally) && | |||
5676 | hasMoreUses(*LHS.MI, *RHS.MI, MRI)) | |||
5677 | FirstMulHasFewerUses = false; | |||
5678 | ||||
5679 | unsigned PreferredFusedOpcode = | |||
5680 | HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; | |||
5681 | ||||
5682 | // fold (fsub (fmul x, y), z) -> (fma x, y, -z) | |||
5683 | if (FirstMulHasFewerUses && | |||
5684 | (isContractableFMul(*LHS.MI, AllowFusionGlobally) && | |||
5685 | (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg)))) { | |||
5686 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
5687 | Register NegZ = B.buildFNeg(DstTy, RHS.Reg).getReg(0); | |||
5688 | B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, | |||
5689 | {LHS.MI->getOperand(1).getReg(), | |||
5690 | LHS.MI->getOperand(2).getReg(), NegZ}); | |||
5691 | }; | |||
5692 | return true; | |||
5693 | } | |||
5694 | // fold (fsub x, (fmul y, z)) -> (fma -y, z, x) | |||
5695 | else if ((isContractableFMul(*RHS.MI, AllowFusionGlobally) && | |||
5696 | (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg)))) { | |||
5697 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
5698 | Register NegY = | |||
5699 | B.buildFNeg(DstTy, RHS.MI->getOperand(1).getReg()).getReg(0); | |||
5700 | B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, | |||
5701 | {NegY, RHS.MI->getOperand(2).getReg(), LHS.Reg}); | |||
5702 | }; | |||
5703 | return true; | |||
5704 | } | |||
5705 | ||||
5706 | return false; | |||
5707 | } | |||
5708 | ||||
5709 | bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA( | |||
5710 | MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { | |||
5711 | assert(MI.getOpcode() == TargetOpcode::G_FSUB)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_FSUB ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_FSUB" , "llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp", 5711, __extension__ __PRETTY_FUNCTION__)); | |||
5712 | ||||
5713 | bool AllowFusionGlobally, HasFMAD, Aggressive; | |||
5714 | if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) | |||
5715 | return false; | |||
5716 | ||||
5717 | Register LHSReg = MI.getOperand(1).getReg(); | |||
5718 | Register RHSReg = MI.getOperand(2).getReg(); | |||
5719 | LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); | |||
5720 | ||||
5721 | unsigned PreferredFusedOpcode = | |||
5722 | HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; | |||
5723 | ||||
5724 | MachineInstr *FMulMI; | |||
5725 | // fold (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z)) | |||
5726 | if (mi_match(LHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) && | |||
5727 | (Aggressive || (MRI.hasOneNonDBGUse(LHSReg) && | |||
5728 | MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) && | |||
5729 | isContractableFMul(*FMulMI, AllowFusionGlobally)) { | |||
5730 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
5731 | Register NegX = | |||
5732 | B.buildFNeg(DstTy, FMulMI->getOperand(1).getReg()).getReg(0); | |||
5733 | Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0); | |||
5734 | B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, | |||
5735 | {NegX, FMulMI->getOperand(2).getReg(), NegZ}); | |||
5736 | }; | |||
5737 | return true; | |||
5738 | } | |||
5739 | ||||
5740 | // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x) | |||
5741 | if (mi_match(RHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) && | |||
5742 | (Aggressive || (MRI.hasOneNonDBGUse(RHSReg) && | |||
5743 | MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) && | |||
5744 | isContractableFMul(*FMulMI, AllowFusionGlobally)) { | |||
5745 | MatchInfo = [=, &MI](MachineIRBuilder &B) { | |||
5746 | B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, | |||