LLVM 20.0.0git
CombinerHelper.h
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===--------------------------------------------------------------------===//
8/// \file
9/// This contains common combine transformations that may be used in a combine
10/// pass,or by the target elsewhere.
11/// Targets can pick individual opcode transformations from the helper or use
12/// tryCombine which invokes all transformations. All of the transformations
13/// return true if the MachineInstruction changed and false otherwise.
14///
15//===--------------------------------------------------------------------===//
16
17#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
18#define LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
19
20#include "llvm/ADT/DenseMap.h"
25#include "llvm/IR/InstrTypes.h"
26#include <functional>
27
28namespace llvm {
29
30class GISelChangeObserver;
31class APInt;
32class ConstantFP;
33class GPtrAdd;
34class GZExtLoad;
35class MachineIRBuilder;
36class MachineInstrBuilder;
37class MachineRegisterInfo;
38class MachineInstr;
39class MachineOperand;
40class GISelKnownBits;
41class MachineDominatorTree;
42class LegalizerInfo;
43struct LegalityQuery;
44class RegisterBank;
45class RegisterBankInfo;
46class TargetLowering;
47class TargetRegisterInfo;
48
50 LLT Ty; // The result type of the extend.
51 unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
53};
54
59 bool RematOffset = false; // True if Offset is a constant that needs to be
60 // rematerialized before the new load/store.
61 bool IsPre = false;
62};
63
65 int64_t Imm;
68};
69
72 int64_t Imm;
73};
74
80};
81
82using BuildFnTy = std::function<void(MachineIRBuilder &)>;
83
85 SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
87 unsigned Opcode = 0; /// The opcode for the produced instruction.
88 OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
92};
93
95 /// Describes instructions to be built during a combine.
99 std::initializer_list<InstructionBuildSteps> InstrsToBuild)
101};
102
104protected:
114
115public:
117 bool IsPreLegalize,
118 GISelKnownBits *KB = nullptr,
119 MachineDominatorTree *MDT = nullptr,
120 const LegalizerInfo *LI = nullptr);
121
123 return KB;
124 }
125
127 return Builder;
128 }
129
130 const TargetLowering &getTargetLowering() const;
131
132 const MachineFunction &getMachineFunction() const;
133
134 const DataLayout &getDataLayout() const;
135
136 LLVMContext &getContext() const;
137
138 /// \returns true if the combiner is running pre-legalization.
139 bool isPreLegalize() const;
140
141 /// \returns true if \p Query is legal on the target.
142 bool isLegal(const LegalityQuery &Query) const;
143
144 /// \return true if the combine is running prior to legalization, or if \p
145 /// Query is legal on the target.
146 bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
147
148 /// \return true if the combine is running prior to legalization, or if \p Ty
149 /// is a legal integer constant type on the target.
150 bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const;
151
152 /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
153 void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
154
155 /// Replace a single register operand with a new register and inform the
156 /// observer of the changes.
158 Register ToReg) const;
159
160 /// Replace the opcode in instruction with a new opcode and inform the
161 /// observer of the changes.
162 void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const;
163
164 /// Get the register bank of \p Reg.
165 /// If Reg has not been assigned a register, a register class,
166 /// or a register bank, then this returns nullptr.
167 ///
168 /// \pre Reg.isValid()
169 const RegisterBank *getRegBank(Register Reg) const;
170
171 /// Set the register bank of \p Reg.
172 /// Does nothing if the RegBank is null.
173 /// This is the counterpart to getRegBank.
174 void setRegBank(Register Reg, const RegisterBank *RegBank);
175
176 /// If \p MI is COPY, try to combine it.
177 /// Returns true if MI changed.
181
182 /// Returns true if \p DefMI precedes \p UseMI or they are the same
183 /// instruction. Both must be in the same basic block.
185
186 /// Returns true if \p DefMI dominates \p UseMI. By definition an
187 /// instruction dominates itself.
188 ///
189 /// If we haven't been provided with a MachineDominatorTree during
190 /// construction, this function returns a conservative result that tracks just
191 /// a single basic block.
192 bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI);
193
194 /// If \p MI is extend that consumes the result of a load, try to combine it.
195 /// Returns true if MI changed.
199
200 /// Match (and (load x), mask) -> zextload x
202
203 /// Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed
204 /// load.
206
209
212
213 /// Match sext_inreg(load p), imm -> sextload p
214 bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
215 void applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
216
217 /// Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM
218 /// when their source operands are identical.
221
222 /// If a brcond's true block is not the fallthrough, make it so by inverting
223 /// the condition and swapping operands.
226
227 /// If \p MI is G_CONCAT_VECTORS, try to combine it.
228 /// Returns true if MI changed.
229 /// Right now, we support:
230 /// - concat_vector(undef, undef) => undef
231 /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
232 /// build_vector(A, B, C, D)
233 /// ==========================================================
234 /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
235 /// can be flattened into a build_vector.
236 /// In the first case \p Ops will be empty
237 /// In the second case \p Ops will contain the operands
238 /// needed to produce the flattened build_vector.
239 ///
240 /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
242 /// Replace \p MI with a flattened build_vector with \p Ops
243 /// or an implicit_def if \p Ops is empty.
245
247 /// Replace \p MI with a flattened build_vector with \p Ops
248 /// or an implicit_def if \p Ops is empty.
250
251 /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
252 /// Returns true if MI changed.
253 ///
254 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
256 /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
257 /// concat_vectors.
258 /// \p Ops will contain the operands needed to produce the flattened
259 /// concat_vectors.
260 ///
261 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
264 /// Replace \p MI with a concat_vectors with \p Ops.
266 const ArrayRef<Register> Ops);
269
270 /// Optimize memcpy intrinsics et al, e.g. constant len calls.
271 /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
272 ///
273 /// For example (pre-indexed):
274 ///
275 /// $addr = G_PTR_ADD $base, $offset
276 /// [...]
277 /// $val = G_LOAD $addr
278 /// [...]
279 /// $whatever = COPY $addr
280 ///
281 /// -->
282 ///
283 /// $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
284 /// [...]
285 /// $whatever = COPY $addr
286 ///
287 /// or (post-indexed):
288 ///
289 /// G_STORE $val, $base
290 /// [...]
291 /// $addr = G_PTR_ADD $base, $offset
292 /// [...]
293 /// $whatever = COPY $addr
294 ///
295 /// -->
296 ///
297 /// $addr = G_INDEXED_STORE $val, $base, $offset
298 /// [...]
299 /// $whatever = COPY $addr
300 bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
301
304
305 /// Fold (shift (shift base, x), y) -> (shift base (x+y))
308
309 /// If we have a shift-by-constant of a bitwise logic op that itself has a
310 /// shift-by-constant operand with identical opcode, we may be able to convert
311 /// that into 2 independent shifts followed by the logic op.
313 ShiftOfShiftedLogic &MatchInfo);
315 ShiftOfShiftedLogic &MatchInfo);
316
317 bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo);
318
319 /// Transform a multiply by a power-of-2 value to a left shift.
320 bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
321 void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
322
323 // Transform a G_SHL with an extended source into a narrower shift if
324 // possible.
327 const RegisterImmPair &MatchData);
328
329 /// Fold away a merge of an unmerge of the corresponding values.
331
332 /// Reduce a shift by a constant to an unmerge and a shift on a half sized
333 /// type. This will not produce a shift smaller than \p TargetShiftSize.
334 bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
335 unsigned &ShiftVal);
336 void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
337 bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);
338
339 /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
340 bool
343 void
346
347 /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
352
353 /// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
354 bool
356 std::function<void(MachineIRBuilder &)> &MatchInfo);
357
358 /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
361
362 /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
365
366 /// Transform fp_instr(cst) to constant result of the fp operation.
368
369 /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
372
373 /// Transform PtrToInt(IntToPtr(x)) to x.
375
376 /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
377 /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
379 std::pair<Register, bool> &PtrRegAndCommute);
381 std::pair<Register, bool> &PtrRegAndCommute);
382
383 // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
386
387 /// Transform anyext(trunc(x)) to x.
389
390 /// Transform zext(trunc(x)) to x.
392
393 /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
395 std::tuple<Register, unsigned> &MatchInfo);
397 std::tuple<Register, unsigned> &MatchInfo);
398
399 /// Transform trunc (shl x, K) to shl (trunc x), K
400 /// if K < VT.getScalarSizeInBits().
401 ///
402 /// Transforms trunc ([al]shr x, K) to (trunc ([al]shr (MidVT (trunc x)), K))
403 /// if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits())
404 /// MidVT is obtained by finding a legal type between the trunc's src and dst
405 /// types.
407 std::pair<MachineInstr *, LLT> &MatchInfo);
409 std::pair<MachineInstr *, LLT> &MatchInfo);
410
411 /// Return true if any explicit use operand on \p MI is defined by a
412 /// G_IMPLICIT_DEF.
414
415 /// Return true if all register explicit use operands on \p MI are defined by
416 /// a G_IMPLICIT_DEF.
418
419 /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
421
422 /// Return true if a G_STORE instruction \p MI is storing an undef value.
424
425 /// Return true if a G_SELECT instruction \p MI has an undef comparison.
427
428 /// Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
430
431 /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
432 /// true, \p OpIdx will store the operand index of the known selected value.
433 bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
434
435 /// Replace an instruction with a G_FCONSTANT with value \p C.
437
438 /// Replace an instruction with an G_FCONSTANT with value \p CFP.
440
441 /// Replace an instruction with a G_CONSTANT with value \p C.
443
444 /// Replace an instruction with a G_CONSTANT with value \p C.
446
447 /// Replace an instruction with a G_IMPLICIT_DEF.
449
450 /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
451 void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);
452
453 /// Delete \p MI and replace all of its uses with \p Replacement.
455
456 /// @brief Replaces the shift amount in \p MI with ShiftAmt % BW
457 /// @param MI
459
460 /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
461 /// equivalent instructions.
462 bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);
463
464 /// Return true if \p MOP is defined by a G_CONSTANT or splat with a value equal to
465 /// \p C.
466 bool matchConstantOp(const MachineOperand &MOP, int64_t C);
467
468 /// Return true if \p MOP is defined by a G_FCONSTANT or splat with a value exactly
469 /// equal to \p C.
470 bool matchConstantFPOp(const MachineOperand &MOP, double C);
471
472 /// @brief Checks if constant at \p ConstIdx is larger than \p MI 's bitwidth
473 /// @param ConstIdx Index of the constant
474 bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx);
475
476 /// Optimize (cond ? x : x) -> x
478
479 /// Optimize (x op x) -> x
481
482 /// Check if operand \p OpIdx is zero.
483 bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);
484
485 /// Check if operand \p OpIdx is undef.
486 bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
487
488 /// Check if operand \p OpIdx is known to be a power of 2.
490
491 /// Erase \p MI
493
494 /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
496 std::tuple<Register, Register> &MatchInfo);
498 std::tuple<Register, Register> &MatchInfo);
499
500 /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
501 bool
503 InstructionStepsMatchInfo &MatchInfo);
504
505 /// Replace \p MI with a series of instructions described in \p MatchInfo.
507 InstructionStepsMatchInfo &MatchInfo);
508
509 /// Match ashr (shl x, C), C -> sext_inreg (C)
511 std::tuple<Register, int64_t> &MatchInfo);
513 std::tuple<Register, int64_t> &MatchInfo);
514
515 /// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
517 BuildFnTy &MatchInfo);
518
519 /// \return true if \p MI is a G_AND instruction whose operands are x and y
520 /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
521 ///
522 /// \param [in] MI - The G_AND instruction.
523 /// \param [out] Replacement - A register the G_AND should be replaced with on
524 /// success.
525 bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
526
527 /// \return true if \p MI is a G_OR instruction whose operands are x and y
528 /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
529 /// value.)
530 ///
531 /// \param [in] MI - The G_OR instruction.
532 /// \param [out] Replacement - A register the G_OR should be replaced with on
533 /// success.
534 bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
535
536 /// \return true if \p MI is a G_SEXT_INREG that can be erased.
538
539 /// Combine inverting a result of a compare into the opposite cond code.
542
543 /// Fold (xor (and x, y), y) -> (and (not x), y)
544 ///{
546 std::pair<Register, Register> &MatchInfo);
548 std::pair<Register, Register> &MatchInfo);
549 ///}
550
551 /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
554
555 /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
557
558 /// Push a binary operator through a select on constants.
559 ///
560 /// binop (select cond, K0, K1), K2 ->
561 /// select cond, (binop K0, K2), (binop K1, K2)
562 bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo);
563 void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo);
564
566 SmallVectorImpl<Register> &MatchInfo);
567
569 SmallVectorImpl<Register> &MatchInfo);
570
571 /// Match expression trees of the form
572 ///
573 /// \code
574 /// sN *a = ...
575 /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
576 /// \endcode
577 ///
578 /// And check if the tree can be replaced with a M-bit load + possibly a
579 /// bswap.
580 bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo);
581
584
587
590 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo);
593 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo);
594
595 /// Use a function which takes in a MachineIRBuilder to perform a combine.
596 /// By default, it erases the instruction \p MI from the function.
597 void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo);
598 /// Use a function which takes in a MachineIRBuilder to perform a combine.
599 /// This variant does not erase \p MI after calling the build function.
600 void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo);
601
607
608 /// \returns true if a G_ICMP instruction \p MI can be replaced with a true
609 /// or false constant based off of KnownBits information.
610 bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo);
611
612 /// \returns true if a G_ICMP \p MI can be replaced with its LHS based off of
613 /// KnownBits information.
614 bool
616 BuildFnTy &MatchInfo);
617
618 /// \returns true if (and (or x, c1), c2) can be replaced with (and x, c2)
620
622 BuildFnTy &MatchInfo);
623 /// Match: and (lshr x, cst), mask -> ubfx x, cst, width
625
626 /// Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width
628
629 /// Match: shr (and x, n), k -> ubfx x, pos, width
631
632 // Helpers for reassociation:
634 BuildFnTy &MatchInfo);
637 BuildFnTy &MatchInfo);
639 MachineInstr *RHS, BuildFnTy &MatchInfo);
640 /// Reassociate pointer calculations with G_ADD involved, to allow better
641 /// addressing mode usage.
642 bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo);
643
644 /// Try to reassociate to reassociate operands of a commutative binop.
645 bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0,
646 Register Op1, BuildFnTy &MatchInfo);
647 /// Reassociate commutative binary operations like G_ADD.
649
650 /// Do constant folding when opportunities are exposed after MIR building.
651 bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo);
652
653 /// Do constant folding when opportunities are exposed after MIR building.
654 bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo);
655
656 /// Do constant FP folding when opportunities are exposed after MIR building.
658
659 /// Constant fold G_FMA/G_FMAD.
661
662 /// \returns true if it is possible to narrow the width of a scalar binop
663 /// feeding a G_AND instruction \p MI.
665
666 /// Given an G_UDIV \p MI expressing a divide by constant, return an
667 /// expression that implements it by multiplying by a magic number.
668 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
670 /// Combine G_UDIV by constant into a multiply by magic constant.
673
674 /// Given an G_SDIV \p MI expressing a signed divide by constant, return an
675 /// expression that implements it by multiplying by a magic number.
676 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
680
681 /// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
682 /// return expressions that implements it by shifting.
683 bool matchDivByPow2(MachineInstr &MI, bool IsSigned);
685 /// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
686 /// return expressions that implements it by shifting.
688
689 // G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
692
693 /// Try to transform \p MI by using all of the above
694 /// combine functions. Returns true if changed.
696
697 /// Emit loads and stores that perform the given memcpy.
698 /// Assumes \p MI is a G_MEMCPY_INLINE
699 /// TODO: implement dynamically sized inline memcpy,
700 /// and rename: s/bool tryEmit/void emit/
702
703 /// Match:
704 /// (G_UMULO x, 2) -> (G_UADDO x, x)
705 /// (G_SMULO x, 2) -> (G_SADDO x, x)
706 bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo);
707
708 /// Match:
709 /// (G_*MULO x, 0) -> 0 + no carry out
710 bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo);
711
712 /// Match:
713 /// (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
714 /// (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
715 bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo);
716
717 /// Transform (fadd x, fneg(y)) -> (fsub x, y)
718 /// (fadd fneg(x), y) -> (fsub y, x)
719 /// (fsub x, fneg(y)) -> (fadd x, y)
720 /// (fmul fneg(x), fneg(y)) -> (fmul x, y)
721 /// (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
722 /// (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
723 /// (fma fneg(x), fneg(y), z) -> (fma x, y, z)
725
726 bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo);
727 void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo);
728
729 bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally,
730 bool &HasFMAD, bool &Aggressive,
731 bool CanReassociate = false);
732
733 /// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
734 /// (fadd (fmul x, y), z) -> (fmad x, y, z)
736
737 /// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
738 /// (fadd (fpext (fmul x, y)), z) -> (fmad (fpext x), (fpext y), z)
740 BuildFnTy &MatchInfo);
741
742 /// Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
743 /// (fadd (fmad x, y, (fmul u, v)), z) -> (fmad x, y, (fmad u, v, z))
745 BuildFnTy &MatchInfo);
746
747 // Transform (fadd (fma x, y, (fpext (fmul u, v))), z)
748 // -> (fma x, y, (fma (fpext u), (fpext v), z))
749 // (fadd (fmad x, y, (fpext (fmul u, v))), z)
750 // -> (fmad x, y, (fmad (fpext u), (fpext v), z))
752 BuildFnTy &MatchInfo);
753
754 /// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
755 /// (fsub (fmul x, y), z) -> (fmad x, y, -z)
757
758 /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
759 /// (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z))
761 BuildFnTy &MatchInfo);
762
763 /// Transform (fsub (fpext (fmul x, y)), z)
764 /// -> (fma (fpext x), (fpext y), (fneg z))
765 /// (fsub (fpext (fmul x, y)), z)
766 /// -> (fmad (fpext x), (fpext y), (fneg z))
768 BuildFnTy &MatchInfo);
769
770 /// Transform (fsub (fpext (fneg (fmul x, y))), z)
771 /// -> (fneg (fma (fpext x), (fpext y), z))
772 /// (fsub (fpext (fneg (fmul x, y))), z)
773 /// -> (fneg (fmad (fpext x), (fpext y), z))
775 BuildFnTy &MatchInfo);
776
777 bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info);
778
779 /// Transform G_ADD(x, G_SUB(y, x)) to y.
780 /// Transform G_ADD(G_SUB(y, x), x) to y.
782
786
787 /// Transform:
788 /// (x + y) - y -> x
789 /// (x + y) - x -> y
790 /// x - (y + x) -> 0 - y
791 /// x - (x + z) -> 0 - z
792 bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo);
793
794 /// \returns true if it is possible to simplify a select instruction \p MI
795 /// to a min/max instruction of some sort.
797
798 /// Transform:
799 /// (X + Y) == X -> Y == 0
800 /// (X - Y) == X -> Y == 0
801 /// (X ^ Y) == X -> Y == 0
802 /// (X + Y) != X -> Y != 0
803 /// (X - Y) != X -> Y != 0
804 /// (X ^ Y) != X -> Y != 0
806
807 /// Match shifts greater or equal to the bitwidth of the operation.
809
810 /// Match constant LHS ops that should be commuted.
812
813 /// Combine sext of trunc.
814 bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo);
815
816 /// Combine zext of trunc.
817 bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo);
818
819 /// Combine zext nneg to sext.
820 bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo);
821
822 /// Match constant LHS FP ops that should be commuted.
824
825 // Given a binop \p MI, commute operands 1 and 2.
827
828 /// Combine select to integer min/max.
829 bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo);
830
831 /// Combine selects.
832 bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo);
833
834 /// Combine ands.
835 bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo);
836
837 /// Combine ors.
838 bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo);
839
840 /// Combine addos.
841 bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo);
842
843 /// Combine extract vector element.
845
846 /// Combine extract vector element with a build vector on the vector register.
848 BuildFnTy &MatchInfo);
849
850 /// Combine extract vector element with a build vector trunc on the vector
851 /// register.
853 BuildFnTy &MatchInfo);
854
855 /// Combine extract vector element with a shuffle vector on the vector
856 /// register.
858 BuildFnTy &MatchInfo);
859
860 /// Combine extract vector element with a insert vector element on the vector
861 /// register and different indices.
863 BuildFnTy &MatchInfo);
864 /// Use a function which takes in a MachineIRBuilder to perform a combine.
865 /// By default, it erases the instruction def'd on \p MO from the function.
866 void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo);
867
868 /// Match FPOWI if it's safe to extend it into a series of multiplications.
870
871 /// Expands FPOWI into a series of multiplications and a division if the
872 /// exponent is negative.
874
875 /// Combine insert vector element OOB.
877
879 BuildFnTy &MatchInfo);
880
881 bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
882
883 bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
884
885 bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
886
887 bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
888
889 /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
890 bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI,
891 BuildFnTy &MatchInfo);
892
893 bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI,
894 BuildFnTy &MatchInfo);
895 bool matchFoldAPlusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
896
897 bool matchFoldC2MinusAPlusC1(const MachineInstr &MI, BuildFnTy &MatchInfo);
898
899 bool matchFoldAMinusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
900
901 bool matchFoldC1Minus2MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
902
903 // fold ((A-C1)+C2) -> (A+(C2-C1))
904 bool matchFoldAMinusC1PlusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
905
906private:
907 /// Checks for legality of an indexed variant of \p LdSt.
908 bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
909 /// Given a non-indexed load or store instruction \p MI, find an offset that
910 /// can be usefully and legally folded into it as a post-indexing operation.
911 ///
912 /// \returns true if a candidate is found.
913 bool findPostIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
914 Register &Offset, bool &RematOffset);
915
916 /// Given a non-indexed load or store instruction \p MI, find an offset that
917 /// can be usefully and legally folded into it as a pre-indexing operation.
918 ///
919 /// \returns true if a candidate is found.
920 bool findPreIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
922
923 /// Helper function for matchLoadOrCombine. Searches for Registers
924 /// which may have been produced by a load instruction + some arithmetic.
925 ///
926 /// \param [in] Root - The search root.
927 ///
928 /// \returns The Registers found during the search.
929 std::optional<SmallVector<Register, 8>>
930 findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
931
932 /// Helper function for matchLoadOrCombine.
933 ///
934 /// Checks if every register in \p RegsToVisit is defined by a load
935 /// instruction + some arithmetic.
936 ///
937 /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
938 /// at to the index of the load.
939 /// \param [in] MemSizeInBits - The number of bits each load should produce.
940 ///
941 /// \returns On success, a 3-tuple containing lowest-index load found, the
942 /// lowest index, and the last load in the sequence.
943 std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
944 findLoadOffsetsForLoadOrCombine(
946 const SmallVector<Register, 8> &RegsToVisit,
947 const unsigned MemSizeInBits);
948
949 /// Examines the G_PTR_ADD instruction \p PtrAdd and determines if performing
950 /// a re-association of its operands would break an existing legal addressing
951 /// mode that the address computation currently represents.
952 bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd);
953
954 /// Behavior when a floating point min/max is given one NaN and one
955 /// non-NaN as input.
956 enum class SelectPatternNaNBehaviour {
957 NOT_APPLICABLE = 0, /// NaN behavior not applicable.
958 RETURNS_NAN, /// Given one NaN input, returns the NaN.
959 RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
960 RETURNS_ANY /// Given one NaN input, can return either (or both operands are
961 /// known non-NaN.)
962 };
963
964 /// \returns which of \p LHS and \p RHS would be the result of a non-equality
965 /// floating point comparison where one of \p LHS and \p RHS may be NaN.
966 ///
967 /// If both \p LHS and \p RHS may be NaN, returns
968 /// SelectPatternNaNBehaviour::NOT_APPLICABLE.
969 SelectPatternNaNBehaviour
970 computeRetValAgainstNaN(Register LHS, Register RHS,
971 bool IsOrderedComparison) const;
972
973 /// Determines the floating point min/max opcode which should be used for
974 /// a G_SELECT fed by a G_FCMP with predicate \p Pred.
975 ///
976 /// \returns 0 if this G_SELECT should not be combined to a floating point
977 /// min or max. If it should be combined, returns one of
978 ///
979 /// * G_FMAXNUM
980 /// * G_FMAXIMUM
981 /// * G_FMINNUM
982 /// * G_FMINIMUM
983 ///
984 /// Helper function for matchFPSelectToMinMax.
985 unsigned getFPMinMaxOpcForSelect(CmpInst::Predicate Pred, LLT DstTy,
986 SelectPatternNaNBehaviour VsNaNRetVal) const;
987
988 /// Handle floating point cases for matchSimplifySelectToMinMax.
989 ///
990 /// E.g.
991 ///
992 /// select (fcmp uge x, 1.0) x, 1.0 -> fmax x, 1.0
993 /// select (fcmp uge x, 1.0) 1.0, x -> fminnm x, 1.0
994 bool matchFPSelectToMinMax(Register Dst, Register Cond, Register TrueVal,
995 Register FalseVal, BuildFnTy &MatchInfo);
996
997 /// Try to fold selects to logical operations.
998 bool tryFoldBoolSelectToLogic(GSelect *Select, BuildFnTy &MatchInfo);
999
1000 bool tryFoldSelectOfConstants(GSelect *Select, BuildFnTy &MatchInfo);
1001
1002 bool isOneOrOneSplat(Register Src, bool AllowUndefs);
1003 bool isZeroOrZeroSplat(Register Src, bool AllowUndefs);
1004 bool isConstantSplatVector(Register Src, int64_t SplatValue,
1005 bool AllowUndefs);
1006 bool isConstantOrConstantVectorI(Register Src) const;
1007
1008 std::optional<APInt> getConstantOrConstantSplatVector(Register Src);
1009
1010 /// Fold (icmp Pred1 V1, C1) && (icmp Pred2 V2, C2)
1011 /// or (icmp Pred1 V1, C1) || (icmp Pred2 V2, C2)
1012 /// into a single comparison using range-based reasoning.
1013 bool tryFoldAndOrOrICmpsUsingRanges(GLogicalBinOp *Logic,
1014 BuildFnTy &MatchInfo);
1015
1016 // Simplify (cmp cc0 x, y) (&& or ||) (cmp cc1 x, y) -> cmp cc2 x, y.
1017 bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo);
1018
1019 bool isCastFree(unsigned Opcode, LLT ToTy, LLT FromTy) const;
1020};
1021} // namespace llvm
1022
1023#endif
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
amdgpu AMDGPU Register Bank Select
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file defines the DenseMap class.
uint64_t Addr
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
Implement a low-level type suitable for MachineInstr level instruction selection.
mir Rename Register Operands
unsigned Reg
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
void applyUDivByConst(MachineInstr &MI)
void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal)
bool matchCombineShuffleVector(MachineInstr &MI, SmallVectorImpl< Register > &Ops)
Check if the G_SHUFFLE_VECTOR MI can be replaced by a concat_vectors.
bool matchPtrAddZero(MachineInstr &MI)
}
bool matchAllExplicitUsesAreUndef(MachineInstr &MI)
Return true if all register explicit use operands on MI are defined by a G_IMPLICIT_DEF.
void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx)
Delete MI and replace all of its uses with its OpIdx-th operand.
const RegisterBank * getRegBank(Register Reg) const
Get the register bank of Reg.
bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo)
Reassociate pointer calculations with G_ADD involved, to allow better addressing mode usage.
bool matchUDivByConst(MachineInstr &MI)
Combine G_UDIV by constant into a multiply by magic constant.
void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg)
bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI)
Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
bool matchShiftsTooBig(MachineInstr &MI)
Match shifts greater or equal to the bitwidth of the operation.
bool tryCombineCopy(MachineInstr &MI)
If MI is COPY, try to combine it.
bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo)
bool matchUndefStore(MachineInstr &MI)
Return true if a G_STORE instruction MI is storing an undef value.
bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform: (X + Y) == X -> Y == 0 (X - Y) == X -> Y == 0 (X ^ Y) == X -> Y == 0 (X + Y) !...
bool matchRedundantSExtInReg(MachineInstr &MI)
bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine sext of trunc.
bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS, BuildFnTy &MatchInfo)
bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent)
Match FPOWI if it's safe to extend it into a series of multiplications.
bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform: (x + y) - y -> x (x + y) - x -> y x - (y + x) -> 0 - y x - (x + z) -> 0 - z.
bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo)
Do constant FP folding when opportunities are exposed after MIR building.
void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal)
void applyCombineUnmergeZExtToZExt(MachineInstr &MI)
void applyCommuteBinOpOperands(MachineInstr &MI)
bool matchBinOpSameVal(MachineInstr &MI)
Optimize (x op x) -> x.
bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo)
void applyCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts)
bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) (fsub (fneg (fmul,...
bool matchCombineCopy(MachineInstr &MI)
bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx)
Return true if a G_SELECT instruction MI has a constant comparison.
void eraseInst(MachineInstr &MI)
Erase MI.
bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo)
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, Register ToReg) const
Replace a single register operand with a new register and inform the observer of the changes.
bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) (fadd (fmad x,...
bool matchExtractVectorElementWithDifferentIndices(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine extract vector element with a insert vector element on the vector register and different indi...
void applySimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo)
bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops)
If MI is G_CONCAT_VECTORS, try to combine it.
bool matchAddSubSameReg(MachineInstr &MI, Register &Src)
Transform G_ADD(x, G_SUB(y, x)) to y.
void applyRotateOutOfRange(MachineInstr &MI)
const DataLayout & getDataLayout() const
bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: (G_UMULO x, 2) -> (G_UADDO x, x) (G_SMULO x, 2) -> (G_SADDO x, x)
bool matchRotateOutOfRange(MachineInstr &MI)
void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst)
void applyCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo)
bool matchExtractVectorElementWithBuildVectorTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine extract vector element with a build vector trunc on the vector register.
void applyCombineShuffleVector(MachineInstr &MI, const ArrayRef< Register > Ops)
Replace MI with a concat_vectors with Ops.
const TargetLowering & getTargetLowering() const
void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo)
Use a function which takes in a MachineIRBuilder to perform a combine.
void applyPtrAddZero(MachineInstr &MI)
bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo)
void setRegBank(Register Reg, const RegisterBank *RegBank)
Set the register bank of Reg.
bool matchRedundantAnd(MachineInstr &MI, Register &Replacement)
void replaceInstWithConstant(MachineInstr &MI, int64_t C)
Replace an instruction with a G_CONSTANT with value C.
bool matchAshrShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo)
Match ashr (shl x, C), C -> sext_inreg (C)
bool tryCombineExtendingLoads(MachineInstr &MI)
If MI is extend that consumes the result of a load, try to combine it.
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount)
bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo)
bool matchCombineUnmergeUndef(MachineInstr &MI, std::function< void(MachineIRBuilder &)> &MatchInfo)
Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
GISelKnownBits * getKnownBits() const
void applySDivByConst(MachineInstr &MI)
bool matchUndefSelectCmp(MachineInstr &MI)
Return true if a G_SELECT instruction MI has an undef comparison.
void replaceInstWithUndef(MachineInstr &MI)
Replace an instruction with a G_IMPLICIT_DEF.
bool matchRedundantOr(MachineInstr &MI, Register &Replacement)
bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx)
Check if operand OpIdx is undef.
void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo)
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst)
void replaceInstWithFConstant(MachineInstr &MI, double C)
Replace an instruction with a G_FCONSTANT with value C.
bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI, BuildFnTy &MatchInfo)
Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2)
Return true if MOP1 and MOP2 are register operands are defined by equivalent instructions.
bool tryCombine(MachineInstr &MI)
Try to transform MI by using all of the above combine functions.
bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo)
Fold (shift (shift base, x), y) -> (shift base (x+y))
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo)
void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo)
void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond)
bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: (G_*MULO x, 0) -> 0 + no carry out.
void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement)
Delete MI and replace all of its uses with Replacement.
bool matchFunnelShiftToRotate(MachineInstr &MI)
Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
bool matchNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate)
Combine inverting a result of a compare into the opposite cond code.
void applyCombineExtOfExt(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const
Replace the opcode in instruction with a new opcode and inform the observer of the changes.
bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx)
Check if operand OpIdx is known to be a power of 2.
bool matchFoldC1Minus2MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo)
void applyCombineCopy(MachineInstr &MI)
bool matchAnyExplicitUseIsUndef(MachineInstr &MI)
Return true if any explicit use operand on MI is defined by a G_IMPLICIT_DEF.
bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo)
void applyCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute)
bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
void applyCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops)
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
bool matchSextTruncSextLoad(MachineInstr &MI)
bool matchShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo)
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
bool matchInsertVectorElementOOB(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine insert vector element OOB.
GISelKnownBits * KB
bool matchExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo)
void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo)
MachineInstr * buildSDivUsingMul(MachineInstr &MI)
Given an G_SDIV MI expressing a signed divide by constant, return an expression that implements it by...
void applySDivByPow2(MachineInstr &MI)
void applyFunnelShiftConstantModulo(MachineInstr &MI)
Replaces the shift amount in MI with ShiftAmt % BW.
bool matchFoldAPlusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo)
Do constant folding when opportunities are exposed after MIR building.
bool isPreLegalize() const
bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo)
Match (and (load x), mask) -> zextload x.
bool matchConstantOp(const MachineOperand &MOP, int64_t C)
Return true if MOP is defined by a G_CONSTANT or splat with a value equal to C.
bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fmul x, y), z) -> (fma x, y, -z) (fsub (fmul x, y), z) -> (fmad x,...
bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine ands.
void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg)
void applyNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate)
void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo)
bool matchConstantFPOp(const MachineOperand &MOP, double C)
Return true if MOP is defined by a G_FCONSTANT or splat with a value exactly equal to C.
bool matchSimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo)
Return true if MI is a G_ADD which can be simplified to a G_SUB.
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0)
Optimize memcpy intrinsics et al, e.g.
bool matchSelectSameVal(MachineInstr &MI)
Optimize (cond ? x : x) -> x.
void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst)
Transform fp_instr(cst) to constant result of the fp operation.
bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo)
LLVMContext & getContext() const
bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0, Register Op1, BuildFnTy &MatchInfo)
Try to reassociate to reassociate operands of a commutative binop.
bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const
bool tryEmitMemcpyInline(MachineInstr &MI)
Emit loads and stores that perform the given memcpy.
void applyXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo)
bool matchXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo)
Fold (xor (and x, y), y) -> (and (not x), y) {.
bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z)) (fsub (fpext (fmul x,...
bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info)
bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData)
bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo)
Constant fold G_FMA/G_FMAD.
bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: and (lshr x, cst), mask -> ubfx x, cst, width.
void applyShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo)
void applyExpandFPowI(MachineInstr &MI, int64_t Exponent)
Expands FPOWI into a series of multiplications and a division if the exponent is negative.
bool isLegal(const LegalityQuery &Query) const
bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine selects.
bool matchCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts)
Transform G_UNMERGE Constant -> Constant1, Constant2, ...
bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo)
bool matchExtractVectorElementWithShuffleVector(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine extract vector element with a shuffle vector on the vector register.
bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo)
bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg)
Transform anyext(trunc(x)) to x.
bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI, BuildFnTy &MatchInfo)
void applySimplifyURemByPow2(MachineInstr &MI)
Combine G_UREM x, (known power of 2) to an add and bitmasking.
bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo)
void applyCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops)
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
MachineRegisterInfo & MRI
void applyUMulHToLShr(MachineInstr &MI)
bool matchFoldAMinusC1PlusC2(const MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo)
Match expression trees of the form.
bool matchShuffleToExtract(MachineInstr &MI)
bool matchUndefShuffleVectorMask(MachineInstr &MI)
Return true if a G_SHUFFLE_VECTOR instruction MI has an undef mask.
bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const
bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI)
bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchCombineExtractedVectorLoad(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed load.
bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal)
Transform a multiply by a power-of-2 value to a left shift.
bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width.
void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo)
SelectOperand is the operand in binary operator MI that is the select to fold.
bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo)
bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd (fmul x, y), z) -> (fma x, y, z) (fadd (fmul x, y), z) -> (fmad x,...
bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd x, fneg(y)) -> (fsub x, y) (fadd fneg(x), y) -> (fsub y, x) (fsub x,...
bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo)
Fold away a merge of an unmerge of the corresponding values.
void applyCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo)
bool matchCombineUnmergeZExtToZExt(MachineInstr &MI)
Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0.
bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI)
Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx)
Checks if constant at ConstIdx is larger than MI 's bitwidth.
bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI)
Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM when their source operands are iden...
bool matchFoldAMinusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo)
bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI)
Returns true if DefMI precedes UseMI or they are the same instruction.
bool matchDivByPow2(MachineInstr &MI, bool IsSigned)
Given an G_SDIV MI expressing a signed divided by a pow2 constant, return expressions that implements...
bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg)
bool matchUMulHToLShr(MachineInstr &MI)
bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI)
Returns true if DefMI dominates UseMI.
bool matchExtractVectorElementWithBuildVector(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine extract vector element with a build vector on the vector register.
MachineInstr * buildUDivUsingMul(MachineInstr &MI)
Given an G_UDIV MI expressing a divide by constant, return an expression that implements it by multip...
bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg)
Transform zext(trunc(x)) to x.
void applyCombineShlOfExtend(MachineInstr &MI, const RegisterImmPair &MatchData)
bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine zext nneg to sext.
bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally, bool &HasFMAD, bool &Aggressive, bool CanReassociate=false)
const LegalizerInfo * LI
bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine zext of trunc.
void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI)
void applyShuffleToExtract(MachineInstr &MI)
MachineDominatorTree * MDT
bool matchSDivByConst(MachineInstr &MI)
bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI, BuildFnTy &MatchInfo)
Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
MachineIRBuilder & getBuilder() const
void applySextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
bool matchCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands)
Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
void applyExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo)
void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo)
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo)
Transform trunc (shl x, K) to shl (trunc x), K if K < VT.getScalarSizeInBits().
const RegisterBankInfo * RBI
bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine extract vector element.
bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) (fadd (fpext (fmul x,...
bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo)
void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI)
const TargetRegisterInfo * TRI
bool tryCombineShuffleVector(MachineInstr &MI)
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg)
Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo)
GISelChangeObserver & Observer
bool matchCombineExtOfExt(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
Transform [asz]ext([asz]ext(x)) to [asz]ext x.
bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0.
bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
Match sext_inreg(load p), imm -> sextload p.
bool matchCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo)
bool matchCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute)
Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y) Transform G_ADD y,...
bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine ors.
void applyFunnelShiftToRotate(MachineInstr &MI)
void applyCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands)
bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond)
If a brcond's true block is not the fallthrough, make it so by inverting the condition and swapping o...
bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: (G_*ADDE x, y, 0) -> (G_*ADDO x, y) (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine addos.
void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg)
Transform PtrToInt(IntToPtr(x)) to x.
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize, unsigned &ShiftVal)
Reduce a shift by a constant to an unmerge and a shift on a half sized type.
bool matchCommuteConstantToRHS(MachineInstr &MI)
Match constant LHS ops that should be commuted.
bool matchFoldC2MinusAPlusC1(const MachineInstr &MI, BuildFnTy &MatchInfo)
void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo)
void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI)
void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo)
void applyBuildInstructionSteps(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo)
Replace MI with a series of instructions described in MatchInfo.
bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fpext (fneg (fmul x, y))), z) -> (fneg (fma (fpext x), (fpext y),...
MachineIRBuilder & Builder
bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine select to integer min/max.
bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: shr (and x, n), k -> ubfx x, pos, width.
bool matchCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops)
bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo)
Reassociate commutative binary operations like G_ADD.
bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo)
Push a binary operator through a select on constants.
bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo)
Do constant folding when opportunities are exposed after MIR building.
const MachineFunction & getMachineFunction() const
bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx)
Check if operand OpIdx is zero.
bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo)
void applyUDivByPow2(MachineInstr &MI)
Given an G_UDIV MI expressing an unsigned divided by a pow2 constant, return expressions that impleme...
bool matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo)
Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
void applyAshShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo)
void applySextTruncSextLoad(MachineInstr &MI)
bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo)
bool matchCommuteFPConstantToRHS(MachineInstr &MI)
Match constant LHS FP ops that should be commuted.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:103
Abstract class that contains various methods for clients to notify about changes.
Represents any type of generic load or store.
Represents a logical binary operation.
Represents a G_PTR_ADD.
Represents a G_SELECT.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Helper class to build MachineInstr.
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Holds all the information related to register banks.
This class implements the register bank concept.
Definition: RegisterBank.h:28
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ ConstantFP
Definition: ISDOpcodes.h:77
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
std::function< void(MachineIRBuilder &)> BuildFnTy
InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
InstructionBuildSteps()=default
Operands to be added to the instruction.
OperandBuildSteps OperandFns
The opcode for the produced instruction.
InstructionStepsMatchInfo(std::initializer_list< InstructionBuildSteps > InstrsToBuild)
SmallVector< InstructionBuildSteps, 2 > InstrsToBuild
Describes instructions to be built during a combine.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
MachineInstr * MI
const RegisterBank * Bank