LLVM 23.0.0git
CombinerHelper.h
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===--------------------------------------------------------------------===//
8/// \file
9/// This contains common combine transformations that may be used in a combine
10/// pass,or by the target elsewhere.
11/// Targets can pick individual opcode transformations from the helper or use
12/// tryCombine which invokes all transformations. All of the transformations
13/// return true if the MachineInstruction changed and false otherwise.
14///
15//===--------------------------------------------------------------------===//
16
17#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
18#define LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
19
20#include "llvm/ADT/DenseMap.h"
26#include "llvm/IR/InstrTypes.h"
27#include <functional>
28
29namespace llvm {
30
32class APInt;
33class ConstantFP;
34class GPtrAdd;
35class GZExtLoad;
39class MachineInstr;
40class MachineOperand;
43class LegalizerInfo;
44struct LegalityQuery;
45class RegisterBank;
47class TargetLowering;
49
51 LLT Ty; // The result type of the extend.
52 unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
54};
55
60 bool RematOffset = false; // True if Offset is a constant that needs to be
61 // rematerialized before the new load/store.
62 bool IsPre = false;
63};
64
66 int64_t Imm;
69 unsigned Flags;
70};
71
74 int64_t Imm;
75};
76
83
92
93using BuildFnTy = std::function<void(MachineIRBuilder &)>;
94
96 SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
98 unsigned Opcode = 0; /// The opcode for the produced instruction.
99 OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
103};
104
106 /// Describes instructions to be built during a combine.
110 std::initializer_list<InstructionBuildSteps> InstrsToBuild)
112};
113
115protected:
125
126public:
128 bool IsPreLegalize, GISelValueTracking *VT = nullptr,
129 MachineDominatorTree *MDT = nullptr,
130 const LegalizerInfo *LI = nullptr);
131
133
135 return Builder;
136 }
137
138 const TargetLowering &getTargetLowering() const;
139
140 const MachineFunction &getMachineFunction() const;
141
142 const DataLayout &getDataLayout() const;
143
144 LLVMContext &getContext() const;
145
146 /// \returns true if the combiner is running pre-legalization.
147 bool isPreLegalize() const;
148
149 /// \returns true if \p Query is legal on the target.
150 bool isLegal(const LegalityQuery &Query) const;
151
152 /// \return true if the combine is running prior to legalization, or if \p
153 /// Query is legal on the target.
154 bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
155
156 /// \return true if \p Query is legal on the target, or if \p Query will
157 /// perform WidenScalar action on the target.
158 bool isLegalOrHasWidenScalar(const LegalityQuery &Query) const;
159
160 /// \return true if \p Query is legal on the target, or if \p Query will
161 /// perform a FewerElements action on the target.
162 bool isLegalOrHasFewerElements(const LegalityQuery &Query) const;
163
164 /// \return true if the combine is running prior to legalization, or if \p Ty
165 /// is a legal integer constant type on the target.
166 bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const;
167
168 /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
169 void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
170
171 /// Replace a single register operand with a new register and inform the
172 /// observer of the changes.
174 Register ToReg) const;
175
176 /// Replace the opcode in instruction with a new opcode and inform the
177 /// observer of the changes.
178 void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const;
179
180 /// Get the register bank of \p Reg.
181 /// If Reg has not been assigned a register, a register class,
182 /// or a register bank, then this returns nullptr.
183 ///
184 /// \pre Reg.isValid()
185 const RegisterBank *getRegBank(Register Reg) const;
186
187 /// Set the register bank of \p Reg.
188 /// Does nothing if the RegBank is null.
189 /// This is the counterpart to getRegBank.
190 void setRegBank(Register Reg, const RegisterBank *RegBank) const;
191
192 /// If \p MI is COPY, try to combine it.
193 /// Returns true if MI changed.
194 bool tryCombineCopy(MachineInstr &MI) const;
195 bool matchCombineCopy(MachineInstr &MI) const;
196 void applyCombineCopy(MachineInstr &MI) const;
197
198 /// Returns true if \p DefMI precedes \p UseMI or they are the same
199 /// instruction. Both must be in the same basic block.
200 bool isPredecessor(const MachineInstr &DefMI,
201 const MachineInstr &UseMI) const;
202
203 /// Returns true if \p DefMI dominates \p UseMI. By definition an
204 /// instruction dominates itself.
205 ///
206 /// If we haven't been provided with a MachineDominatorTree during
207 /// construction, this function returns a conservative result that tracks just
208 /// a single basic block.
209 bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI) const;
210
211 /// If \p MI is extend that consumes the result of a load, try to combine it.
212 /// Returns true if MI changed.
215 PreferredTuple &MatchInfo) const;
217 PreferredTuple &MatchInfo) const;
218
219 /// Match (and (load x), mask) -> zextload x
221 BuildFnTy &MatchInfo) const;
222
223 /// Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed
224 /// load.
226 BuildFnTy &MatchInfo) const;
227
229 IndexedLoadStoreMatchInfo &MatchInfo) const;
231 IndexedLoadStoreMatchInfo &MatchInfo) const;
232
235
236 /// Match sext_inreg(load p), imm -> sextload p
238 std::tuple<Register, unsigned> &MatchInfo) const;
240 std::tuple<Register, unsigned> &MatchInfo) const;
241
242 /// Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM
243 /// when their source operands are identical.
244 bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const;
245 void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const;
246
247 /// If a brcond's true block is not the fallthrough, make it so by inverting
248 /// the condition and swapping operands.
250 MachineInstr *&BrCond) const;
252 MachineInstr *&BrCond) const;
253
254 /// If \p MI is G_CONCAT_VECTORS, try to combine it.
255 /// Returns true if MI changed.
256 /// Right now, we support:
257 /// - concat_vector(undef, undef) => undef
258 /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
259 /// build_vector(A, B, C, D)
260 /// ==========================================================
261 /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
262 /// can be flattened into a build_vector.
263 /// In the first case \p Ops will be empty
264 /// In the second case \p Ops will contain the operands
265 /// needed to produce the flattened build_vector.
266 ///
267 /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
270 /// Replace \p MI with a flattened build_vector with \p Ops
271 /// or an implicit_def if \p Ops is empty.
274
277 /// Replace \p MI with a flattened build_vector with \p Ops
278 /// or an implicit_def if \p Ops is empty.
281
282 /// Replace \p MI with a build_vector.
284
285 /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
286 /// Returns true if MI changed.
287 ///
288 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
290 /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
291 /// concat_vectors.
292 /// \p Ops will contain the operands needed to produce the flattened
293 /// concat_vectors.
294 ///
295 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
298 /// Replace \p MI with a concat_vectors with \p Ops.
300 ArrayRef<Register> Ops) const;
301
302 /// Optimize memcpy intrinsics et al, e.g. constant len calls.
303 /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
304 ///
305 /// For example (pre-indexed):
306 ///
307 /// $addr = G_PTR_ADD $base, $offset
308 /// [...]
309 /// $val = G_LOAD $addr
310 /// [...]
311 /// $whatever = COPY $addr
312 ///
313 /// -->
314 ///
315 /// $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
316 /// [...]
317 /// $whatever = COPY $addr
318 ///
319 /// or (post-indexed):
320 ///
321 /// G_STORE $val, $base
322 /// [...]
323 /// $addr = G_PTR_ADD $base, $offset
324 /// [...]
325 /// $whatever = COPY $addr
326 ///
327 /// -->
328 ///
329 /// $addr = G_INDEXED_STORE $val, $base, $offset
330 /// [...]
331 /// $whatever = COPY $addr
332 bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0) const;
333
334 bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const;
335 void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const;
336
337 /// Fold (shift (shift base, x), y) -> (shift base (x+y))
338 bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const;
339 void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const;
340
341 /// If we have a shift-by-constant of a bitwise logic op that itself has a
342 /// shift-by-constant operand with identical opcode, we may be able to convert
343 /// that into 2 independent shifts followed by the logic op.
345 ShiftOfShiftedLogic &MatchInfo) const;
347 ShiftOfShiftedLogic &MatchInfo) const;
348
349 bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const;
350
351 /// Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (shift x, (C1 + C2))
353 MachineInstr &ShiftMI) const;
355 LshrOfTruncOfLshr &MatchInfo) const;
356
357 /// Transform a multiply by a power-of-2 value to a left shift.
358 bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
359 void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
360
361 // Transform a G_SUB with constant on the RHS to G_ADD.
362 bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
363
364 // Transform a G_SHL with an extended source into a narrower shift if
365 // possible.
367 RegisterImmPair &MatchData) const;
369 const RegisterImmPair &MatchData) const;
370
371 /// Fold away a merge of an unmerge of the corresponding values.
372 bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo) const;
373
374 /// Reduce a shift by a constant to an unmerge and a shift on a half sized
375 /// type. This will not produce a shift smaller than \p TargetShiftSize.
376 bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
377 unsigned &ShiftVal) const;
379 const unsigned &ShiftVal) const;
381 unsigned TargetShiftAmount) const;
382
383 /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
385 MachineInstr &MI, SmallVectorImpl<Register> &Operands) const;
387 MachineInstr &MI, SmallVectorImpl<Register> &Operands) const;
388
389 /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
391 SmallVectorImpl<APInt> &Csts) const;
393 SmallVectorImpl<APInt> &Csts) const;
394
395 /// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
398 std::function<void(MachineIRBuilder &)> &MatchInfo) const;
399
400 /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
403
404 /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
407
408 /// Transform fp_instr(cst) to constant result of the fp operation.
410 const ConstantFP *Cst) const;
411
412 /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
415
416 /// Transform PtrToInt(IntToPtr(x)) to x.
418
419 /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
420 /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
421 bool
423 std::pair<Register, bool> &PtrRegAndCommute) const;
424 void
426 std::pair<Register, bool> &PtrRegAndCommute) const;
427
428 // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
431
432 /// Transform anyext(trunc(x)) to x.
434
435 /// Transform zext(trunc(x)) to x.
437
438 /// Transform trunc (shl x, K) to shl (trunc x), K
439 /// if K < VT.getScalarSizeInBits().
440 ///
441 /// Transforms trunc ([al]shr x, K) to (trunc ([al]shr (MidVT (trunc x)), K))
442 /// if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits())
443 /// MidVT is obtained by finding a legal type between the trunc's src and dst
444 /// types.
445 bool
447 std::pair<MachineInstr *, LLT> &MatchInfo) const;
448 void
450 std::pair<MachineInstr *, LLT> &MatchInfo) const;
451
452 /// Return true if any explicit use operand on \p MI is defined by a
453 /// G_IMPLICIT_DEF.
455
456 /// Return true if all register explicit use operands on \p MI are defined by
457 /// a G_IMPLICIT_DEF.
459
460 /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
462
463 /// Return true if a G_STORE instruction \p MI is storing an undef value.
464 bool matchUndefStore(MachineInstr &MI) const;
465
466 /// Return true if a G_SELECT instruction \p MI has an undef comparison.
468
469 /// Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
471
472 /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
473 /// true, \p OpIdx will store the operand index of the known selected value.
474 bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) const;
475
476 /// Replace an instruction with a G_FCONSTANT with value \p C.
477 void replaceInstWithFConstant(MachineInstr &MI, double C) const;
478
479 /// Replace an instruction with an G_FCONSTANT with value \p CFP.
481
482 /// Replace an instruction with a G_CONSTANT with value \p C.
483 void replaceInstWithConstant(MachineInstr &MI, int64_t C) const;
484
485 /// Replace an instruction with a G_CONSTANT with value \p C.
487
488 /// Replace an instruction with a G_IMPLICIT_DEF.
490
491 /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
493
494 /// Delete \p MI and replace all of its uses with \p Replacement.
496 Register Replacement) const;
497
498 /// @brief Replaces the shift amount in \p MI with ShiftAmt % BW
499 /// @param MI
501
502 /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
503 /// equivalent instructions.
504 bool matchEqualDefs(const MachineOperand &MOP1,
505 const MachineOperand &MOP2) const;
506
507 /// Return true if \p MOP is defined by a G_CONSTANT or splat with a value equal to
508 /// \p C.
509 bool matchConstantOp(const MachineOperand &MOP, int64_t C) const;
510
511 /// Return true if \p MOP is defined by a G_FCONSTANT or splat with a value exactly
512 /// equal to \p C.
513 bool matchConstantFPOp(const MachineOperand &MOP, double C) const;
514
515 /// @brief Checks if constant at \p ConstIdx is larger than \p MI 's bitwidth
516 /// @param ConstIdx Index of the constant
517 bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx) const;
518
519 /// Optimize (cond ? x : x) -> x
521
522 /// Optimize (x op x) -> x
523 bool matchBinOpSameVal(MachineInstr &MI) const;
524
525 /// Check if operand \p OpIdx is undef.
526 bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const;
527
528 /// Check if operand \p OpIdx is known to be a power of 2.
530 unsigned OpIdx) const;
531
532 /// Erase \p MI
533 void eraseInst(MachineInstr &MI) const;
534
535 /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
537 std::tuple<Register, Register> &MatchInfo) const;
539 std::tuple<Register, Register> &MatchInfo) const;
540
541 /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
543 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const;
544
545 /// Replace \p MI with a series of instructions described in \p MatchInfo.
547 InstructionStepsMatchInfo &MatchInfo) const;
548
549 /// Match ashr (shl x, C), C -> sext_inreg (C)
551 std::tuple<Register, int64_t> &MatchInfo) const;
553 std::tuple<Register, int64_t> &MatchInfo) const;
554
555 /// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
556 bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
557
558 /// \return true if \p MI is a G_AND instruction whose operands are x and y
559 /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
560 ///
561 /// \param [in] MI - The G_AND instruction.
562 /// \param [out] Replacement - A register the G_AND should be replaced with on
563 /// success.
564 bool matchRedundantAnd(MachineInstr &MI, Register &Replacement) const;
565
566 /// \return true if \p MI is a G_OR instruction whose operands are x and y
567 /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
568 /// value.)
569 ///
570 /// \param [in] MI - The G_OR instruction.
571 /// \param [out] Replacement - A register the G_OR should be replaced with on
572 /// success.
573 bool matchRedundantOr(MachineInstr &MI, Register &Replacement) const;
574
575 /// \return true if \p MI is a G_SEXT_INREG that can be erased.
577
578 /// Combine inverting a result of a compare into the opposite cond code.
580 SmallVectorImpl<Register> &RegsToNegate) const;
582 SmallVectorImpl<Register> &RegsToNegate) const;
583
584 /// Fold (xor (and x, y), y) -> (and (not x), y)
585 ///{
587 std::pair<Register, Register> &MatchInfo) const;
589 std::pair<Register, Register> &MatchInfo) const;
590 ///}
591
592 /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
593 bool matchPtrAddZero(MachineInstr &MI) const;
594 void applyPtrAddZero(MachineInstr &MI) const;
595
596 /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
598
599 /// Push a binary operator through a select on constants.
600 ///
601 /// binop (select cond, K0, K1), K2 ->
602 /// select cond, (binop K0, K2), (binop K1, K2)
603 bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo) const;
605 const unsigned &SelectOpNo) const;
606
608 SmallVectorImpl<Register> &MatchInfo) const;
609
611 SmallVectorImpl<Register> &MatchInfo) const;
612
613 /// Match expression trees of the form
614 ///
615 /// \code
616 /// sN *a = ...
617 /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
618 /// \endcode
619 ///
620 /// And check if the tree can be replaced with a M-bit load + possibly a
621 /// bswap.
622 bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo) const;
623
626
629
632 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo) const;
635 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo) const;
636
637 /// Use a function which takes in a MachineIRBuilder to perform a combine.
638 /// By default, it erases the instruction \p MI from the function.
639 void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo) const;
640 /// Use a function which takes in a MachineIRBuilder to perform a combine.
641 /// This variant does not erase \p MI after calling the build function.
642 void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const;
643
644 bool matchOrShiftToFunnelShift(MachineInstr &MI, bool AllowScalarConstants,
645 BuildFnTy &MatchInfo) const;
650
652 Register &UnmergeSrc) const;
655 Register &UnmergeSrc) const;
656
657 bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const;
658 void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const;
659
660 /// \returns true if a G_ICMP instruction \p MI can be replaced with a true
661 /// or false constant based off of KnownBits information.
663 int64_t &MatchInfo) const;
664
665 /// \returns true if a G_ICMP \p MI can be replaced with its LHS based off of
666 /// KnownBits information.
667 bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo) const;
668
669 /// \returns true if (and (or x, c1), c2) can be replaced with (and x, c2)
670 bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const;
671
673 BuildFnTy &MatchInfo) const;
674 /// Match: and (lshr x, cst), mask -> ubfx x, cst, width
676 BuildFnTy &MatchInfo) const;
677
678 /// Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width
680 BuildFnTy &MatchInfo) const;
681
682 /// Match: shr (and x, n), k -> ubfx x, pos, width
684 BuildFnTy &MatchInfo) const;
685
686 // Helpers for reassociation:
688 BuildFnTy &MatchInfo) const;
691 BuildFnTy &MatchInfo) const;
694 BuildFnTy &MatchInfo) const;
695 /// Reassociate pointer calculations with G_ADD involved, to allow better
696 /// addressing mode usage.
697 bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
698
699 /// Try to reassociate to reassociate operands of a commutative binop.
700 bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0,
701 Register Op1, BuildFnTy &MatchInfo) const;
702 /// Reassociate commutative binary operations like G_ADD.
703 bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo) const;
704
705 /// Do constant folding when opportunities are exposed after MIR building.
706 bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) const;
707
708 /// Do constant folding when opportunities are exposed after MIR building.
709 bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) const;
710
711 /// Do constant FP folding when opportunities are exposed after MIR building.
712 bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo) const;
713
714 /// Constant fold G_FMA/G_FMAD.
715 bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo) const;
716
717 /// \returns true if it is possible to narrow the width of a scalar binop
718 /// feeding a G_AND instruction \p MI.
719 bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
720
721 /// Given an G_UDIV \p MI or G_UREM \p MI expressing a divide by constant,
722 /// return an expression that implements it by multiplying by a magic number.
723 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
725 /// Combine G_UDIV or G_UREM by constant into a multiply by magic constant.
728
729 /// Given an G_SDIV \p MI or G_SREM \p MI expressing a signed divide by
730 /// constant, return an expression that implements it by multiplying by a
731 /// magic number. Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's
732 /// Guide".
734 /// Combine G_SDIV or G_SREM by constant into a multiply by magic constant.
737
738 /// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
739 /// return expressions that implements it by shifting.
740 bool matchDivByPow2(MachineInstr &MI, bool IsSigned) const;
741 void applySDivByPow2(MachineInstr &MI) const;
742 /// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
743 /// return expressions that implements it by shifting.
744 void applyUDivByPow2(MachineInstr &MI) const;
745
746 // G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
747 bool matchUMulHToLShr(MachineInstr &MI) const;
748 void applyUMulHToLShr(MachineInstr &MI) const;
749
750 // Combine trunc(smin(smax(x, C1), C2)) -> truncssat_s(x)
751 // or trunc(smax(smin(x, C2), C1)) -> truncssat_s(x).
752 bool matchTruncSSatS(MachineInstr &MI, Register &MatchInfo) const;
753 void applyTruncSSatS(MachineInstr &MI, Register &MatchInfo) const;
754
755 // Combine trunc(smin(smax(x, 0), C)) -> truncssat_u(x)
756 // or trunc(smax(smin(x, C), 0)) -> truncssat_u(x)
757 // or trunc(umin(smax(x, 0), C)) -> truncssat_u(x)
758 bool matchTruncSSatU(MachineInstr &MI, Register &MatchInfo) const;
759 void applyTruncSSatU(MachineInstr &MI, Register &MatchInfo) const;
760
761 // Combine trunc(umin(x, C)) -> truncusat_u(x).
762 bool matchTruncUSatU(MachineInstr &MI, MachineInstr &MinMI) const;
763
764 // Combine truncusat_u(fptoui(x)) -> fptoui_sat(x)
766
767 /// Try to transform \p MI by using all of the above
768 /// combine functions. Returns true if changed.
770
771 /// Emit loads and stores that perform the given memcpy.
772 /// Assumes \p MI is a G_MEMCPY_INLINE
773 /// TODO: implement dynamically sized inline memcpy,
774 /// and rename: s/bool tryEmit/void emit/
776
777 /// Match:
778 /// (G_UMULO x, 2) -> (G_UADDO x, x)
779 /// (G_SMULO x, 2) -> (G_SADDO x, x)
780 bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) const;
781
782 /// Match:
783 /// (G_*MULO x, 0) -> 0 + no carry out
784 bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) const;
785
786 /// Match:
787 /// (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
788 /// (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
789 bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) const;
790
791 /// Transform (fadd x, fneg(y)) -> (fsub x, y)
792 /// (fadd fneg(x), y) -> (fsub y, x)
793 /// (fsub x, fneg(y)) -> (fadd x, y)
794 /// (fmul fneg(x), fneg(y)) -> (fmul x, y)
795 /// (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
796 /// (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
797 /// (fma fneg(x), fneg(y), z) -> (fma x, y, z)
798 bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo) const;
799
800 bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) const;
801 void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) const;
802
803 bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally,
804 bool &HasFMAD, bool &Aggressive,
805 bool CanReassociate = false) const;
806
807 /// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
808 /// (fadd (fmul x, y), z) -> (fmad x, y, z)
810 BuildFnTy &MatchInfo) const;
811
812 /// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
813 /// (fadd (fpext (fmul x, y)), z) -> (fmad (fpext x), (fpext y), z)
815 BuildFnTy &MatchInfo) const;
816
817 /// Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
818 /// (fadd (fmad x, y, (fmul u, v)), z) -> (fmad x, y, (fmad u, v, z))
820 BuildFnTy &MatchInfo) const;
821
822 // Transform (fadd (fma x, y, (fpext (fmul u, v))), z)
823 // -> (fma x, y, (fma (fpext u), (fpext v), z))
824 // (fadd (fmad x, y, (fpext (fmul u, v))), z)
825 // -> (fmad x, y, (fmad (fpext u), (fpext v), z))
826 bool
828 BuildFnTy &MatchInfo) const;
829
830 /// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
831 /// (fsub (fmul x, y), z) -> (fmad x, y, -z)
833 BuildFnTy &MatchInfo) const;
834
835 /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
836 /// (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z))
838 BuildFnTy &MatchInfo) const;
839
840 /// Transform (fsub (fpext (fmul x, y)), z)
841 /// -> (fma (fpext x), (fpext y), (fneg z))
842 /// (fsub (fpext (fmul x, y)), z)
843 /// -> (fmad (fpext x), (fpext y), (fneg z))
845 BuildFnTy &MatchInfo) const;
846
847 /// Transform (fsub (fpext (fneg (fmul x, y))), z)
848 /// -> (fneg (fma (fpext x), (fpext y), z))
849 /// (fsub (fpext (fneg (fmul x, y))), z)
850 /// -> (fneg (fmad (fpext x), (fpext y), z))
852 BuildFnTy &MatchInfo) const;
853
854 bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info) const;
855
857 SmallVector<MachineInstr *> &MatchInfo) const;
859
860 /// Transform G_ADD(x, G_SUB(y, x)) to y.
861 /// Transform G_ADD(G_SUB(y, x), x) to y.
862 bool matchAddSubSameReg(MachineInstr &MI, Register &Src) const;
863
865 Register &MatchInfo) const;
866 bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const;
868 Register &MatchInfo) const;
869
870 /// Transform:
871 /// (x + y) - y -> x
872 /// (x + y) - x -> y
873 /// x - (y + x) -> 0 - y
874 /// x - (x + z) -> 0 - z
875 bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo) const;
876
877 /// \returns true if it is possible to simplify a select instruction \p MI
878 /// to a min/max instruction of some sort.
880 BuildFnTy &MatchInfo) const;
881
882 /// Transform:
883 /// (X + Y) == X -> Y == 0
884 /// (X - Y) == X -> Y == 0
885 /// (X ^ Y) == X -> Y == 0
886 /// (X + Y) != X -> Y != 0
887 /// (X - Y) != X -> Y != 0
888 /// (X ^ Y) != X -> Y != 0
890 BuildFnTy &MatchInfo) const;
891
892 /// Match shifts greater or equal to the range (the bitwidth of the result
893 /// datatype, or the effective bitwidth of the source value).
895 std::optional<int64_t> &MatchInfo) const;
896
897 /// Match constant LHS ops that should be commuted.
899
900 /// Combine sext of trunc.
901 bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
902
903 /// Combine zext of trunc.
904 bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
905
906 /// Combine zext nneg to sext.
907 bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
908
909 /// Match constant LHS FP ops that should be commuted.
911
912 // Given a binop \p MI, commute operands 1 and 2.
914
915 /// Combine select to integer min/max.
916 bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
917
918 /// Tranform (neg (min/max x, (neg x))) into (max/min x, (neg x)).
919 bool matchSimplifyNegMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const;
920
921 /// Combine selects.
922 bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const;
923
924 /// Combine ands.
925 bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
926
927 /// Combine ors.
928 bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const;
929
930 /// trunc (binop X, C) --> binop (trunc X, trunc C).
931 bool matchNarrowBinop(const MachineInstr &TruncMI,
932 const MachineInstr &BinopMI,
933 BuildFnTy &MatchInfo) const;
934
935 bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo) const;
936
937 /// Combine addos.
938 bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) const;
939
940 /// Combine extract vector element.
941 bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo) const;
942
943 /// Combine extract vector element with a build vector on the vector register.
945 const MachineInstr &MI2,
946 BuildFnTy &MatchInfo) const;
947
948 /// Combine extract vector element with a build vector trunc on the vector
949 /// register.
950 bool
952 BuildFnTy &MatchInfo) const;
953
954 /// Combine extract vector element with a shuffle vector on the vector
955 /// register.
957 const MachineInstr &MI2,
958 BuildFnTy &MatchInfo) const;
959
960 /// Combine extract vector element with a insert vector element on the vector
961 /// register and different indices.
962 bool
964 BuildFnTy &MatchInfo) const;
965
966 /// Remove references to rhs if it is undef
967 bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo) const;
968
969 /// Turn shuffle a, b, mask -> shuffle undef, b, mask iff mask does not
970 /// reference a.
971 bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const;
972
973 /// Use a function which takes in a MachineIRBuilder to perform a combine.
974 /// By default, it erases the instruction def'd on \p MO from the function.
975 void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
976
977 /// Match FPOWI if it's safe to extend it into a series of multiplications.
978 bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) const;
979
980 /// Expands FPOWI into a series of multiplications and a division if the
981 /// exponent is negative.
982 void applyExpandFPowI(MachineInstr &MI, int64_t Exponent) const;
983
984 /// Combine insert vector element OOB.
986 BuildFnTy &MatchInfo) const;
987
989 BuildFnTy &MatchInfo) const;
990
991 bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
992
993 bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
994
995 bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
996
997 bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
998
999 /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
1000 bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI,
1001 BuildFnTy &MatchInfo) const;
1002
1003 bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI,
1004 BuildFnTy &MatchInfo) const;
1006 BuildFnTy &MatchInfo) const;
1007
1009 BuildFnTy &MatchInfo) const;
1010
1012 BuildFnTy &MatchInfo) const;
1013
1015 BuildFnTy &MatchInfo) const;
1016
1017 // fold ((A-C1)+C2) -> (A+(C2-C1))
1019 BuildFnTy &MatchInfo) const;
1020
1021 bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI,
1022 BuildFnTy &MatchInfo) const;
1023
1024 bool matchCastOfBuildVector(const MachineInstr &CastMI,
1025 const MachineInstr &BVMI,
1026 BuildFnTy &MatchInfo) const;
1027
1029 BuildFnTy &MatchInfo) const;
1031 BuildFnTy &MatchInfo) const;
1032
1033 // unmerge_values(anyext(build vector)) -> build vector(anyext)
1035 BuildFnTy &MatchInfo) const;
1036
1037 // merge_values(_, undef) -> anyext
1038 bool matchMergeXAndUndef(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1039
1040 // merge_values(_, zero) -> zext
1041 bool matchMergeXAndZero(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1042
1043 // overflow sub
1044 bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1045
1046 // (sext_inreg (sext_inreg x, K0), K1)
1048 BuildFnTy &MatchInfo) const;
1049
1050 // (ctlz (xor x, (sra x, bitwidth-1))) -> (add (ctls x), 1) or
1051 // (ctlz (or (shl (xor x, (sra x, bitwidth-1)), 1), 1) -> (ctls x)
1052 bool matchCtls(MachineInstr &CtlzMI, BuildFnTy &MatchInfo) const;
1053
1054private:
1055 /// Checks for legality of an indexed variant of \p LdSt.
1056 bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
1057 /// Given a non-indexed load or store instruction \p MI, find an offset that
1058 /// can be usefully and legally folded into it as a post-indexing operation.
1059 ///
1060 /// \returns true if a candidate is found.
1061 bool findPostIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
1062 Register &Offset, bool &RematOffset) const;
1063
1064 /// Given a non-indexed load or store instruction \p MI, find an offset that
1065 /// can be usefully and legally folded into it as a pre-indexing operation.
1066 ///
1067 /// \returns true if a candidate is found.
1068 bool findPreIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
1069 Register &Offset) const;
1070
1071 /// Helper function for matchLoadOrCombine. Searches for Registers
1072 /// which may have been produced by a load instruction + some arithmetic.
1073 ///
1074 /// \param [in] Root - The search root.
1075 ///
1076 /// \returns The Registers found during the search.
1077 std::optional<SmallVector<Register, 8>>
1078 findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
1079
1080 /// Helper function for matchLoadOrCombine.
1081 ///
1082 /// Checks if every register in \p RegsToVisit is defined by a load
1083 /// instruction + some arithmetic.
1084 ///
1085 /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
1086 /// at to the index of the load.
1087 /// \param [in] MemSizeInBits - The number of bits each load should produce.
1088 ///
1089 /// \returns On success, a 3-tuple containing lowest-index load found, the
1090 /// lowest index, and the last load in the sequence.
1091 std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
1092 findLoadOffsetsForLoadOrCombine(
1094 const SmallVector<Register, 8> &RegsToVisit,
1095 const unsigned MemSizeInBits) const;
1096
1097 /// Examines the G_PTR_ADD instruction \p PtrAdd and determines if performing
1098 /// a re-association of its operands would break an existing legal addressing
1099 /// mode that the address computation currently represents.
1100 bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd) const;
1101
1102 /// Behavior when a floating point min/max is given one NaN and one
1103 /// non-NaN as input.
1104 enum class SelectPatternNaNBehaviour {
1105 NOT_APPLICABLE = 0, /// NaN behavior not applicable.
1106 RETURNS_NAN, /// Given one NaN input, returns the NaN.
1107 RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
1108 RETURNS_ANY /// Given one NaN input, can return either (or both operands are
1109 /// known non-NaN.)
1110 };
1111
1112 /// \returns which of \p LHS and \p RHS would be the result of a non-equality
1113 /// floating point comparison where one of \p LHS and \p RHS may be NaN.
1114 ///
1115 /// If both \p LHS and \p RHS may be NaN, returns
1116 /// SelectPatternNaNBehaviour::NOT_APPLICABLE.
1117 SelectPatternNaNBehaviour
1118 computeRetValAgainstNaN(Register LHS, Register RHS,
1119 bool IsOrderedComparison) const;
1120
1121 /// Determines the floating point min/max opcode which should be used for
1122 /// a G_SELECT fed by a G_FCMP with predicate \p Pred.
1123 ///
1124 /// \returns 0 if this G_SELECT should not be combined to a floating point
1125 /// min or max. If it should be combined, returns one of
1126 ///
1127 /// * G_FMAXNUM
1128 /// * G_FMAXIMUM
1129 /// * G_FMINNUM
1130 /// * G_FMINIMUM
1131 ///
1132 /// Helper function for matchFPSelectToMinMax.
1133 unsigned getFPMinMaxOpcForSelect(CmpInst::Predicate Pred, LLT DstTy,
1134 SelectPatternNaNBehaviour VsNaNRetVal) const;
1135
1136 /// Handle floating point cases for matchSimplifySelectToMinMax.
1137 ///
1138 /// E.g.
1139 ///
1140 /// select (fcmp uge x, 1.0) x, 1.0 -> fmax x, 1.0
1141 /// select (fcmp uge x, 1.0) 1.0, x -> fminnm x, 1.0
1142 bool matchFPSelectToMinMax(Register Dst, Register Cond, Register TrueVal,
1143 Register FalseVal, BuildFnTy &MatchInfo) const;
1144
1145 /// Try to fold selects to logical operations.
1146 bool tryFoldBoolSelectToLogic(GSelect *Select, BuildFnTy &MatchInfo) const;
1147
1148 bool tryFoldSelectOfConstants(GSelect *Select, BuildFnTy &MatchInfo) const;
1149
1150 bool isOneOrOneSplat(Register Src, bool AllowUndefs) const;
1151 bool isZeroOrZeroSplat(Register Src, bool AllowUndefs) const;
1152 bool isConstantSplatVector(Register Src, int64_t SplatValue,
1153 bool AllowUndefs) const;
1154 bool isConstantOrConstantVectorI(Register Src) const;
1155
1156 std::optional<APInt> getConstantOrConstantSplatVector(Register Src) const;
1157
1158 /// Fold (icmp Pred1 V1, C1) && (icmp Pred2 V2, C2)
1159 /// or (icmp Pred1 V1, C1) || (icmp Pred2 V2, C2)
1160 /// into a single comparison using range-based reasoning.
1161 bool tryFoldAndOrOrICmpsUsingRanges(GLogicalBinOp *Logic,
1162 BuildFnTy &MatchInfo) const;
1163
1164 // Simplify (cmp cc0 x, y) (&& or ||) (cmp cc1 x, y) -> cmp cc2 x, y.
1165 bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo) const;
1166
1167 bool isCastFree(unsigned Opcode, LLT ToTy, LLT FromTy) const;
1168
1169 bool constantFoldICmp(const GICmp &ICmp, const GIConstant &LHSCst,
1170 const GIConstant &RHSCst, BuildFnTy &MatchInfo) const;
1171 bool constantFoldFCmp(const GFCmp &FCmp, const GFConstant &LHSCst,
1172 const GFConstant &RHSCst, BuildFnTy &MatchInfo) const;
1173};
1174} // namespace llvm
1175
1176#endif
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
AMDGPU Register Bank Select
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Implement a low-level type suitable for MachineInstr level instruction selection.
Register Reg
MachineInstr unsigned OpIdx
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo) const
bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchRepeatedFPDivisor(MachineInstr &MI, SmallVector< MachineInstr * > &MatchInfo) const
bool matchFoldC2MinusAPlusC1(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match expression trees of the form.
bool tryCombine(MachineInstr &MI) const
Try to transform MI by using all of the above combine functions.
const RegisterBank * getRegBank(Register Reg) const
Get the register bank of Reg.
void applyPtrAddZero(MachineInstr &MI) const
bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2) const
Return true if MOP1 and MOP2 are register operands are defined by equivalent instructions.
void applyUDivOrURemByConst(MachineInstr &MI) const
bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) const
Do constant folding when opportunities are exposed after MIR building.
void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const
bool matchUnmergeValuesAnyExtBuildVector(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchCtls(MachineInstr &CtlzMI, BuildFnTy &MatchInfo) const
bool matchSelectSameVal(MachineInstr &MI) const
Optimize (cond ? x : x) -> x.
bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_*ADDE x, y, 0) -> (G_*ADDO x, y) (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width.
bool matchFoldAMinusC1PlusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchTruncSSatU(MachineInstr &MI, Register &MatchInfo) const
void applySimplifyURemByPow2(MachineInstr &MI) const
Combine G_UREM x, (known power of 2) to an add and bitmasking.
bool matchCombineUnmergeZExtToZExt(MachineInstr &MI) const
Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0.
bool matchPtrAddZero(MachineInstr &MI) const
}
void applyCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops) const
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
void applyXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo) const
bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally, bool &HasFMAD, bool &Aggressive, bool CanReassociate=false) const
bool matchFoldAPlusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const
void applyCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts) const
bool matchShiftsTooBig(MachineInstr &MI, std::optional< int64_t > &MatchInfo) const
Match shifts greater or equal to the range (the bitwidth of the result datatype, or the effective bit...
bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) (fadd (fpext (fmul x,...
bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const
void applyCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops) const
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement) const
Delete MI and replace all of its uses with Replacement.
void applyCombineShuffleToBuildVector(MachineInstr &MI) const
Replace MI with a build_vector.
bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine zext of trunc.
bool matchCombineExtractedVectorLoad(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed load.
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, Register ToReg) const
Replace a single register operand with a new register and inform the observer of the changes.
bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo) const
Reassociate commutative binary operations like G_ADD.
bool matchExtractVectorElementWithBuildVectorTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine extract vector element with a build vector trunc on the vector register.
void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCommuteConstantToRHS(MachineInstr &MI) const
Match constant LHS ops that should be commuted.
const DataLayout & getDataLayout() const
bool matchBinOpSameVal(MachineInstr &MI) const
Optimize (x op x) -> x.
bool matchSimplifyNegMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const
Tranform (neg (min/max x, (neg x))) into (max/min x, (neg x)).
bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const
Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM when their source operands are iden...
bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine zext nneg to sext.
void applyUMulHToLShr(MachineInstr &MI) const
void applyNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate) const
bool isLegalOrHasFewerElements(const LegalityQuery &Query) const
bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const
Fold (shift (shift base, x), y) -> (shift base (x+y))
void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) const
bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const
bool matchAllExplicitUsesAreUndef(MachineInstr &MI) const
Return true if all register explicit use operands on MI are defined by a G_IMPLICIT_DEF.
bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI) const
Returns true if DefMI precedes UseMI or they are the same instruction.
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const
bool matchTruncSSatS(MachineInstr &MI, Register &MatchInfo) const
const TargetLowering & getTargetLowering() const
bool matchExtractVectorElementWithDifferentIndices(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine extract vector element with a insert vector element on the vector register and different indi...
bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo) const
Remove references to rhs if it is undef.
void applyBuildInstructionSteps(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const
Replace MI with a series of instructions described in MatchInfo.
void applySDivByPow2(MachineInstr &MI) const
void applySimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo) const
void applyUDivByPow2(MachineInstr &MI) const
Given an G_UDIV MI expressing an unsigned divided by a pow2 constant, return expressions that impleme...
bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine ors.
bool matchLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo, MachineInstr &ShiftMI) const
Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (shift x, (C1 + C2))
bool matchInsertVectorElementOOB(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine insert vector element OOB.
bool matchSimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo) const
Return true if MI is a G_ADD which can be simplified to a G_SUB.
void replaceInstWithConstant(MachineInstr &MI, int64_t C) const
Replace an instruction with a G_CONSTANT with value C.
bool tryEmitMemcpyInline(MachineInstr &MI) const
Emit loads and stores that perform the given memcpy.
bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z)) (fsub (fpext (fmul x,...
void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) const
bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx) const
Checks if constant at ConstIdx is larger than MI 's bitwidth.
GISelValueTracking * getValueTracking() const
void applyCombineCopy(MachineInstr &MI) const
bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine extract vector element.
bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine sext of trunc.
bool matchAddSubSameReg(MachineInstr &MI, Register &Src) const
Transform G_ADD(x, G_SUB(y, x)) to y.
bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData) const
bool matchMergeXAndZero(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute) const
bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fmul x, y), z) -> (fma x, y, -z) (fsub (fmul x, y), z) -> (fmad x,...
bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) (fadd (fmad x,...
bool matchSextTruncSextLoad(MachineInstr &MI) const
bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo) const
Fold away a merge of an unmerge of the corresponding values.
bool matchCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo) const
bool matchCombineBuildUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI, Register &UnmergeSrc) const
bool matchDivByPow2(MachineInstr &MI, bool IsSigned) const
Given an G_SDIV MI expressing a signed divided by a pow2 constant, return expressions that implements...
bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd x, fneg(y)) -> (fsub x, y) (fadd fneg(x), y) -> (fsub y, x) (fsub x,...
bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match (and (load x), mask) -> zextload x.
bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fmul x, y), z) -> (fma x, y, z) (fadd (fmul x, y), z) -> (fmad x,...
bool matchCombineCopy(MachineInstr &MI) const
bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const
void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const
bool matchXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo) const
Fold (xor (and x, y), y) -> (and (not x), y) {.
bool matchCombineShuffleVector(MachineInstr &MI, SmallVectorImpl< Register > &Ops) const
Check if the G_SHUFFLE_VECTOR MI can be replaced by a concat_vectors.
void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const
bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI, BuildFnTy &MatchInfo) const
Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
bool matchCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute) const
Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y) Transform G_ADD y,...
void replaceInstWithFConstant(MachineInstr &MI, double C) const
Replace an instruction with a G_FCONSTANT with value C.
bool matchMergeXAndUndef(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchFunnelShiftToRotate(MachineInstr &MI) const
Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
bool matchOrShiftToFunnelShift(MachineInstr &MI, bool AllowScalarConstants, BuildFnTy &MatchInfo) const
bool matchRedundantSExtInReg(MachineInstr &MI) const
void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const
Replace the opcode in instruction with a new opcode and inform the observer of the changes.
void applyFunnelShiftConstantModulo(MachineInstr &MI) const
Replaces the shift amount in MI with ShiftAmt % BW.
bool matchFoldC1Minus2MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineShlOfExtend(MachineInstr &MI, const RegisterImmPair &MatchData) const
void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const
CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B, bool IsPreLegalize, GISelValueTracking *VT=nullptr, MachineDominatorTree *MDT=nullptr, const LegalizerInfo *LI=nullptr)
bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
Turn shuffle a, b, mask -> shuffle undef, b, mask iff mask does not reference a.
bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const
Transform a multiply by a power-of-2 value to a left shift.
void applyCombineShuffleVector(MachineInstr &MI, ArrayRef< Register > Ops) const
Replace MI with a concat_vectors with Ops.
bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const
bool matchCombineUnmergeUndef(MachineInstr &MI, std::function< void(MachineIRBuilder &)> &MatchInfo) const
Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo) const
SelectOperand is the operand in binary operator MI that is the select to fold.
bool matchFoldAMinusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const
bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_UMULO x, 2) -> (G_UADDO x, x) (G_SMULO x, 2) -> (G_SADDO x, x)
bool matchCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops) const
void applySextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo) const
bool tryCombineCopy(MachineInstr &MI) const
If MI is COPY, try to combine it.
bool matchTruncUSatU(MachineInstr &MI, MachineInstr &MinMI) const
bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI, BuildFnTy &MatchInfo) const
bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Reassociate pointer calculations with G_ADD involved, to allow better addressing mode usage.
bool matchCanonicalizeFCmp(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchUndefShuffleVectorMask(MachineInstr &MI) const
Return true if a G_SHUFFLE_VECTOR instruction MI has an undef mask.
bool matchAnyExplicitUseIsUndef(MachineInstr &MI) const
Return true if any explicit use operand on MI is defined by a G_IMPLICIT_DEF.
bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) const
Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is known to be a power of 2.
bool matchCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops) const
If MI is G_CONCAT_VECTORS, try to combine it.
bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) const
Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
bool matchExtractVectorElementWithShuffleVector(const MachineInstr &MI, const MachineInstr &MI2, BuildFnTy &MatchInfo) const
Combine extract vector element with a shuffle vector on the vector register.
bool matchExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo) const
LLVMContext & getContext() const
void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const
bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const
bool matchNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate) const
Combine inverting a result of a compare into the opposite cond code.
bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo) const
Match sext_inreg(load p), imm -> sextload p.
bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine select to integer min/max.
void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst) const
Transform fp_instr(cst) to constant result of the fp operation.
bool isLegal(const LegalityQuery &Query) const
bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo) const
bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0, Register Op1, BuildFnTy &MatchInfo) const
Try to reassociate to reassociate operands of a commutative binop.
void eraseInst(MachineInstr &MI) const
Erase MI.
bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo) const
Do constant FP folding when opportunities are exposed after MIR building.
void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const
bool matchUndefStore(MachineInstr &MI) const
Return true if a G_STORE instruction MI is storing an undef value.
MachineRegisterInfo & MRI
void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) const
Transform PtrToInt(IntToPtr(x)) to x.
void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const
bool matchConstantFPOp(const MachineOperand &MOP, double C) const
Return true if MOP is defined by a G_FCONSTANT or splat with a value exactly equal to C.
MachineInstr * buildUDivOrURemUsingMul(MachineInstr &MI) const
Given an G_UDIV MI or G_UREM MI expressing a divide by constant, return an expression that implements...
void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const
bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo) const
Push a binary operator through a select on constants.
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount) const
bool tryCombineExtendingLoads(MachineInstr &MI) const
If MI is extend that consumes the result of a load, try to combine it.
bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const
bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo) const
bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: shr (and x, n), k -> ubfx x, pos, width.
void applyTruncSSatS(MachineInstr &MI, Register &MatchInfo) const
bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) const
Do constant folding when opportunities are exposed after MIR building.
bool tryCombineShuffleVector(MachineInstr &MI) const
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
void applyRotateOutOfRange(MachineInstr &MI) const
bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const
Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: and (lshr x, cst), mask -> ubfx x, cst, width.
bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
bool matchNarrowBinop(const MachineInstr &TruncMI, const MachineInstr &BinopMI, BuildFnTy &MatchInfo) const
trunc (binop X, C) --> binop (trunc X, trunc C).
bool matchUndefSelectCmp(MachineInstr &MI) const
Return true if a G_SELECT instruction MI has an undef comparison.
bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
void replaceInstWithUndef(MachineInstr &MI) const
Replace an instruction with a G_IMPLICIT_DEF.
bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform: (X + Y) == X -> Y == 0 (X - Y) == X -> Y == 0 (X ^ Y) == X -> Y == 0 (X + Y) !...
bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond) const
If a brcond's true block is not the fallthrough, make it so by inverting the condition and swapping o...
bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine addos.
void applyAshShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo) const
bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine selects.
bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo) const
bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const
Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) const
bool matchRotateOutOfRange(MachineInstr &MI) const
void applyExpandFPowI(MachineInstr &MI, int64_t Exponent) const
Expands FPOWI into a series of multiplications and a division if the exponent is negative.
void setRegBank(Register Reg, const RegisterBank *RegBank) const
Set the register bank of Reg.
bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) const
Return true if a G_SELECT instruction MI has a constant comparison.
bool matchCommuteFPConstantToRHS(MachineInstr &MI) const
Match constant LHS FP ops that should be commuted.
void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const
bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info) const
bool matchRedundantOr(MachineInstr &MI, Register &Replacement) const
void applyTruncSSatU(MachineInstr &MI, Register &MatchInfo) const
bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fpext (fneg (fmul x, y))), z) -> (fneg (fma (fpext x), (fpext y),...
bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const
bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
void applyCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo) const
bool matchConstantOp(const MachineOperand &MOP, int64_t C) const
Return true if MOP is defined by a G_CONSTANT or splat with a value equal to C.
const LegalizerInfo * LI
void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const
void applyCombineBuildUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B, Register &UnmergeSrc) const
bool matchUMulHToLShr(MachineInstr &MI) const
MachineDominatorTree * MDT
MachineIRBuilder & getBuilder() const
void applyFunnelShiftToRotate(MachineInstr &MI) const
bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyRepeatedFPDivisor(SmallVector< MachineInstr * > &MatchInfo) const
bool matchTruncUSatUToFPTOUISat(MachineInstr &MI, MachineInstr &SrcMI) const
const RegisterBankInfo * RBI
bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_*MULO x, 0) -> 0 + no carry out.
GISelValueTracking * VT
bool matchCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts) const
Transform G_UNMERGE Constant -> Constant1, Constant2, ...
void applyShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const
const TargetRegisterInfo * TRI
bool matchRedundantAnd(MachineInstr &MI, Register &Replacement) const
bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI) const
Returns true if DefMI dominates UseMI.
GISelChangeObserver & Observer
void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo) const
Transform trunc (shl x, K) to shl (trunc x), K if K < VT.getScalarSizeInBits().
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize, unsigned &ShiftVal) const
Reduce a shift by a constant to an unmerge and a shift on a half sized type.
bool matchUDivOrURemByConst(MachineInstr &MI) const
Combine G_UDIV or G_UREM by constant into a multiply by magic constant.
bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine ands.
bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchRedundantSextInReg(MachineInstr &Root, MachineInstr &Other, BuildFnTy &MatchInfo) const
bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo) const
Constant fold G_FMA/G_FMAD.
bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) (fsub (fneg (fmul,...
bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg) const
Transform zext(trunc(x)) to x.
bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is undef.
void applyLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo) const
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0) const
Optimize memcpy intrinsics et al, e.g.
bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI, BuildFnTy &MatchInfo) const
void applySDivOrSRemByConst(MachineInstr &MI) const
MachineInstr * buildSDivOrSRemUsingMul(MachineInstr &MI) const
Given an G_SDIV MI or G_SREM MI expressing a signed divide by constant, return an expression that imp...
bool isLegalOrHasWidenScalar(const LegalityQuery &Query) const
bool matchCanonicalizeICmp(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchCastOfBuildVector(const MachineInstr &CastMI, const MachineInstr &BVMI, BuildFnTy &MatchInfo) const
bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform: (x + y) - y -> x (x + y) - x -> y x - (y + x) -> 0 - y x - (x + z) -> 0 - z.
bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo) const
bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0.
bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) const
Transform anyext(trunc(x)) to x.
void applyExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo) const
MachineIRBuilder & Builder
void applyCommuteBinOpOperands(MachineInstr &MI) const
void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx) const
Delete MI and replace all of its uses with its OpIdx-th operand.
void applySextTruncSextLoad(MachineInstr &MI) const
const MachineFunction & getMachineFunction() const
bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtractVectorElementWithBuildVector(const MachineInstr &MI, const MachineInstr &MI2, BuildFnTy &MatchInfo) const
Combine extract vector element with a build vector on the vector register.
bool matchSDivOrSRemByConst(MachineInstr &MI) const
Combine G_SDIV or G_SREM by constant into a multiply by magic constant.
void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond) const
void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal) const
bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI, BuildFnTy &MatchInfo) const
bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) const
Match FPOWI if it's safe to extend it into a series of multiplications.
void applyCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo) const
bool matchCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands) const
Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
void applyCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands) const
bool matchAshrShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo) const
Match ashr (shl x, C), C -> sext_inreg (C)
void applyCombineUnmergeZExtToZExt(MachineInstr &MI) const
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Represent a G_FCMP.
An floating-point-like constant.
Definition Utils.h:686
Represent a G_ICMP.
An integer-like constant.
Definition Utils.h:647
Abstract class that contains various methods for clients to notify about changes.
Represents any type of generic load or store.
Represents a logical binary operation.
Represents a G_PTR_ADD.
Represents a G_SELECT.
Represents a G_ZEXTLOAD.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Helper class to build MachineInstr.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Holds all the information related to register banks.
This class implements the register bank concept.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
std::function< void(MachineIRBuilder &)> BuildFnTy
SmallVector< std::function< void(MachineInstrBuilder &)>, 4 > OperandBuildSteps
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Other
Any other memory.
Definition ModRef.h:68
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
InstructionBuildSteps()=default
Operands to be added to the instruction.
OperandBuildSteps OperandFns
The opcode for the produced instruction.
InstructionStepsMatchInfo(std::initializer_list< InstructionBuildSteps > InstrsToBuild)
SmallVector< InstructionBuildSteps, 2 > InstrsToBuild
Describes instructions to be built during a combine.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
const RegisterBank * Bank