LLVM 20.0.0git
Classes | Macros | Enumerations | Functions | Variables
AArch64ISelLowering.cpp File Reference
#include "AArch64ISelLowering.h"
#include "AArch64CallingConvention.h"
#include "AArch64ExpandImm.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64PerfectShuffle.h"
#include "AArch64RegisterInfo.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ComplexDeinterleavingPass.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/InstructionCost.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SipHash.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <bitset>
#include <cassert>
#include <cctype>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <limits>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
#include "AArch64GenAsmMatcher.inc"

Go to the source code of this file.

Classes

struct  GenericSetCCInfo
 Helper structure to keep track of ISD::SET_CC operands. More...
 
struct  AArch64SetCCInfo
 Helper structure to keep track of a SET_CC lowered into AArch64 code. More...
 
union  SetCCInfo
 Helper structure to keep track of SetCC information. More...
 
struct  SetCCInfoAndKind
 Helper structure to be able to read SetCC information. More...
 

Macros

#define DEBUG_TYPE   "aarch64-lower"
 
#define LCALLNAMES(A, B, N)
 
#define LCALLNAME4(A, B)
 
#define LCALLNAME5(A, B)
 
#define MAKE_CASE(V)
 
#define GET_REGISTER_MATCHER
 

Enumerations

enum class  PredicateConstraint { Uph , Upl , Upa }
 
enum class  ReducedGprConstraint { Uci , Ucj }
 

Functions

 STATISTIC (NumTailCalls, "Number of tail calls")
 
 STATISTIC (NumShiftInserts, "Number of vector shift inserts")
 
 STATISTIC (NumOptimizedImms, "Number of times immediates were optimized")
 
static EVT getPackedSVEVectorVT (EVT VT)
 
static EVT getPackedSVEVectorVT (ElementCount EC)
 
static EVT getPromotedVTForPredicate (EVT VT)
 
static bool isPackedVectorType (EVT VT, SelectionDAG &DAG)
 Returns true if VT's elements occupy the lowest bit positions of its associated register class without any intervening space.
 
static bool isMergePassthruOpcode (unsigned Opc)
 
static bool isZeroingInactiveLanes (SDValue Op)
 
static std::tuple< SDValue, SDValueextractPtrauthBlendDiscriminators (SDValue Disc, SelectionDAG *DAG)
 
static bool isIntImmediate (const SDNode *N, uint64_t &Imm)
 
static bool isOpcWithIntImmediate (const SDNode *N, unsigned Opc, uint64_t &Imm)
 
static bool optimizeLogicalImm (SDValue Op, unsigned Size, uint64_t Imm, const APInt &Demanded, TargetLowering::TargetLoweringOpt &TLO, unsigned NewOpc)
 
static EVT getContainerForFixedLengthVector (SelectionDAG &DAG, EVT VT)
 
static SDValue convertToScalableVector (SelectionDAG &DAG, EVT VT, SDValue V)
 
static SDValue convertFromScalableVector (SelectionDAG &DAG, EVT VT, SDValue V)
 
static SDValue convertFixedMaskToScalableVector (SDValue Mask, SelectionDAG &DAG)
 
static SDValue getPredicateForVector (SelectionDAG &DAG, SDLoc &DL, EVT VT)
 
static SDValue getPredicateForScalableVector (SelectionDAG &DAG, SDLoc &DL, EVT VT)
 
static bool isZerosVector (const SDNode *N)
 isZerosVector - Check whether SDNode N is a zero-filled vector.
 
static AArch64CC::CondCode changeIntCCToAArch64CC (ISD::CondCode CC)
 changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 CC
 
static void changeFPCCToAArch64CC (ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
 changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
 
static void changeFPCCToANDAArch64CC (ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
 Convert a DAG fp condition code to an AArch64 CC.
 
static void changeVectorFPCCToAArch64CC (ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
 changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC usable with the vector instructions.
 
static bool isLegalArithImmed (uint64_t C)
 
static bool cannotBeIntMin (SDValue CheckedVal, SelectionDAG &DAG)
 
static bool isCMN (SDValue Op, ISD::CondCode CC, SelectionDAG &DAG)
 
static SDValue emitStrictFPComparison (SDValue LHS, SDValue RHS, const SDLoc &dl, SelectionDAG &DAG, SDValue Chain, bool IsSignaling)
 
static SDValue emitComparison (SDValue LHS, SDValue RHS, ISD::CondCode CC, const SDLoc &dl, SelectionDAG &DAG)
 
static unsigned getCmpOperandFoldingProfit (SDValue Op)
 Returns how profitable it is to fold a comparison's operand's shift and/or extension operations.
 
static SDValue getAArch64Cmp (SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AArch64cc, SelectionDAG &DAG, const SDLoc &dl)
 
static std::pair< SDValue, SDValuegetAArch64XALUOOp (AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG)
 
static SDValue valueToCarryFlag (SDValue Value, SelectionDAG &DAG, bool Invert)
 
static SDValue carryFlagToValue (SDValue Glue, EVT VT, SelectionDAG &DAG, bool Invert)
 
static SDValue overflowFlagToValue (SDValue Glue, EVT VT, SelectionDAG &DAG)
 
static SDValue lowerADDSUBO_CARRY (SDValue Op, SelectionDAG &DAG, unsigned Opcode, bool IsSigned)
 
static SDValue LowerXALUO (SDValue Op, SelectionDAG &DAG)
 
static SDValue LowerPREFETCH (SDValue Op, SelectionDAG &DAG)
 
static MVT getSVEContainerType (EVT ContentTy)
 
static EVT getExtensionTo64Bits (const EVT &OrigVT)
 
static SDValue addRequiredExtensionForVectorMULL (SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
 
static std::optional< uint64_tgetConstantLaneNumOfExtractHalfOperand (SDValue &Op)
 
static bool isExtendedBUILD_VECTOR (SDValue N, SelectionDAG &DAG, bool isSigned)
 
static SDValue skipExtensionForVectorMULL (SDValue N, SelectionDAG &DAG)
 
static bool isSignExtended (SDValue N, SelectionDAG &DAG)
 
static bool isZeroExtended (SDValue N, SelectionDAG &DAG)
 
static bool isAddSubSExt (SDValue N, SelectionDAG &DAG)
 
static bool isAddSubZExt (SDValue N, SelectionDAG &DAG)
 
static unsigned selectUmullSmull (SDValue &N0, SDValue &N1, SelectionDAG &DAG, SDLoc DL, bool &IsMLA)
 
static SDValue getPTrue (SelectionDAG &DAG, SDLoc DL, EVT VT, int Pattern)
 
static SDValue optimizeIncrementingWhile (SDValue Op, SelectionDAG &DAG, bool IsSigned, bool IsEqual)
 
static SDValue getSVEPredicateBitCast (EVT VT, SDValue Op, SelectionDAG &DAG)
 
SDValue LowerSMELdrStr (SDValue N, SelectionDAG &DAG, bool IsLoad)
 
unsigned getGatherVecOpcode (bool IsScaled, bool IsSigned, bool NeedsExtend)
 
unsigned getSignExtendedGatherOpcode (unsigned Opcode)
 
static SDValue LowerTruncateVectorStore (SDLoc DL, StoreSDNode *ST, EVT VT, EVT MemVT, SelectionDAG &DAG)
 
static SDValue LowerBRCOND (SDValue Op, SelectionDAG &DAG)
 
static SDValue LowerFunnelShift (SDValue Op, SelectionDAG &DAG)
 
static SDValue LowerFLDEXP (SDValue Op, SelectionDAG &DAG)
 
static unsigned getIntrinsicID (const SDNode *N)
 
static bool isPassedInFPR (EVT VT)
 
static bool canGuaranteeTCO (CallingConv::ID CC, bool GuaranteeTailCalls)
 Return true if the calling convention is one that we can guarantee TCO for.
 
static bool mayTailCallThisCC (CallingConv::ID CC)
 Return true if we might ever do TCO for calls with this calling convention.
 
static bool callConvSupportsVarArgs (CallingConv::ID CC)
 Return true if the call convention supports varargs Currently only those that pass varargs like the C calling convention does are eligible Calling conventions listed in this function must also be properly handled in AArch64Subtarget::isCallingConvWin64.
 
static void analyzeCallOperands (const AArch64TargetLowering &TLI, const AArch64Subtarget *Subtarget, const TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo)
 
static bool checkZExtBool (SDValue Arg, const SelectionDAG &DAG)
 
static unsigned getSMCondition (const SMEAttrs &CallerAttrs, const SMEAttrs &CalleeAttrs)
 
std::pair< SDValue, uint64_tlookThroughSignExtension (SDValue Val)
 
static bool isOrXorChain (SDValue N, unsigned &Num, SmallVector< std::pair< SDValue, SDValue >, 16 > &WorkList)
 
static SDValue performOrXorChainCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue getEstimate (const AArch64Subtarget *ST, unsigned Opcode, SDValue Operand, SelectionDAG &DAG, int &ExtraSteps)
 
static std::optional< PredicateConstraintparsePredicateConstraint (StringRef Constraint)
 
static const TargetRegisterClassgetPredicateRegisterClass (PredicateConstraint Constraint, EVT VT)
 
static std::optional< ReducedGprConstraintparseReducedGprConstraint (StringRef Constraint)
 
static const TargetRegisterClassgetReducedGprRegisterClass (ReducedGprConstraint Constraint, EVT VT)
 
static AArch64CC::CondCode parseConstraintCode (llvm::StringRef Constraint)
 
static SDValue getSETCC (AArch64CC::CondCode CC, SDValue NZCV, const SDLoc &DL, SelectionDAG &DAG)
 Helper function to create 'CSET', which is equivalent to 'CSINC <Wd>, WZR, WZR, invert(<cond>)'.
 
static SDValue WidenVector (SDValue V64Reg, SelectionDAG &DAG)
 WidenVector - Given a value in the V64 register class, produce the equivalent value in the V128 register class.
 
static unsigned getExtFactor (SDValue &V)
 getExtFactor - Determine the adjustment factor for the position when generating an "extract from vector registers" instruction.
 
SDValue ReconstructShuffleWithRuntimeMask (SDValue Op, SelectionDAG &DAG)
 
static bool isSingletonEXTMask (ArrayRef< int > M, EVT VT, unsigned &Imm)
 
static SDValue ReconstructTruncateFromBuildVector (SDValue V, SelectionDAG &DAG)
 
static bool isWideDUPMask (ArrayRef< int > M, EVT VT, unsigned BlockSize, unsigned &DupLaneOp)
 Check if a vector shuffle corresponds to a DUP instructions with a larger element width than the vector lane type.
 
static bool isEXTMask (ArrayRef< int > M, EVT VT, bool &ReverseEXT, unsigned &Imm)
 
static bool isZIP_v_undef_Mask (ArrayRef< int > M, EVT VT, unsigned &WhichResult)
 isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
 
static bool isUZP_v_undef_Mask (ArrayRef< int > M, EVT VT, unsigned &WhichResult)
 isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
 
static bool isTRN_v_undef_Mask (ArrayRef< int > M, EVT VT, unsigned &WhichResult)
 isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
 
static bool isINSMask (ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
 
static bool isConcatMask (ArrayRef< int > Mask, EVT VT, bool SplitLHS)
 
static SDValue tryFormConcatFromShuffle (SDValue Op, SelectionDAG &DAG)
 
static SDValue GeneratePerfectShuffle (unsigned ID, SDValue V1, SDValue V2, unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
 GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations to build the shuffle.
 
static SDValue GenerateTBL (SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
 
static unsigned getDUPLANEOp (EVT EltType)
 
static SDValue constructDup (SDValue V, int Lane, SDLoc dl, EVT VT, unsigned Opcode, SelectionDAG &DAG)
 
static bool isWideTypeMask (ArrayRef< int > M, EVT VT, SmallVectorImpl< int > &NewMask)
 
static SDValue tryWidenMaskForShuffle (SDValue Op, SelectionDAG &DAG)
 
static SDValue tryToConvertShuffleOfTbl2ToTbl4 (SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
 
static bool resolveBuildVector (BuildVectorSDNode *BVN, APInt &CnstBits, APInt &UndefBits)
 
static SDValue tryAdvSIMDModImm64 (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
 
static SDValue tryAdvSIMDModImm32 (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits, const SDValue *LHS=nullptr)
 
static SDValue tryAdvSIMDModImm16 (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits, const SDValue *LHS=nullptr)
 
static SDValue tryAdvSIMDModImm321s (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
 
static SDValue tryAdvSIMDModImm8 (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
 
static SDValue tryAdvSIMDModImmFP (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
 
static bool isAllConstantBuildVector (const SDValue &PotentialBVec, uint64_t &ConstVal)
 
static bool isAllInactivePredicate (SDValue N)
 
static bool isAllActivePredicate (SelectionDAG &DAG, SDValue N)
 
static SDValue tryLowerToSLI (SDNode *N, SelectionDAG &DAG)
 
SDValue tryWhileWRFromOR (SDValue Op, SelectionDAG &DAG, const AArch64Subtarget &Subtarget)
 Try to lower the construction of a pointer alias mask to a WHILEWR.
 
static SDValue NormalizeBuildVector (SDValue Op, SelectionDAG &DAG)
 
static SDValue ConstantBuildVector (SDValue Op, SelectionDAG &DAG, const AArch64Subtarget *ST)
 
static bool isPow2Splat (SDValue Op, uint64_t &SplatVal, bool &Negated)
 
static bool getVShiftImm (SDValue Op, unsigned ElementBits, int64_t &Cnt)
 getVShiftImm - Check if this is a valid build_vector for the immediate operand of a vector shift operation, where all the elements of the build_vector must have the same constant integer value.
 
static bool isVShiftLImm (SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
 isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left operation.
 
static bool isVShiftRImm (SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
 isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift right operation.
 
static bool canLowerSRLToRoundingShiftForVT (SDValue Shift, EVT ResVT, SelectionDAG &DAG, unsigned &ShiftValue, SDValue &RShOperand)
 
static SDValue EmitVectorComparison (SDValue LHS, SDValue RHS, AArch64CC::CondCode CC, bool NoNans, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
 
static SDValue getReductionSDNode (unsigned Op, SDLoc DL, SDValue ScalarOp, SelectionDAG &DAG)
 
static SDValue getVectorBitwiseReduce (unsigned Opcode, SDValue Vec, EVT VT, SDLoc DL, SelectionDAG &DAG)
 
template<unsigned NumVecs>
static bool setInfoSVEStN (const AArch64TargetLowering &TLI, const DataLayout &DL, AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI)
 Set the IntrinsicInfo for the aarch64_sve_st<N> intrinsics.
 
static bool isSplatShuffle (Value *V)
 
static bool areExtractShuffleVectors (Value *Op1, Value *Op2, bool AllowSplat=false)
 Check if both Op1 and Op2 are shufflevector extracts of either the lower or upper half of the vector elements.
 
static bool areExtractExts (Value *Ext1, Value *Ext2)
 Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements.
 
static bool isOperandOfVmullHighP64 (Value *Op)
 Check if Op could be used with vmull_high_p64 intrinsic.
 
static bool areOperandsOfVmullHighP64 (Value *Op1, Value *Op2)
 Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.
 
static bool shouldSinkVectorOfPtrs (Value *Ptrs, SmallVectorImpl< Use * > &Ops)
 
static bool shouldSinkVScale (Value *Op, SmallVectorImpl< Use * > &Ops)
 We want to sink following cases: (add|sub|gep) A, ((mul|shl) vscale, imm); (add|sub|gep) A, vscale; (add|sub|gep) A, ((mul|shl) zext(vscale), imm);.
 
static bool createTblShuffleMask (unsigned SrcWidth, unsigned DstWidth, unsigned NumElts, bool IsLittleEndian, SmallVectorImpl< int > &Mask)
 
static ValuecreateTblShuffleForZExt (IRBuilderBase &Builder, Value *Op, FixedVectorType *ZExtTy, FixedVectorType *DstTy, bool IsLittleEndian)
 
static ValuecreateTblShuffleForSExt (IRBuilderBase &Builder, Value *Op, FixedVectorType *DstTy, bool IsLittleEndian)
 
static void createTblForTrunc (TruncInst *TI, bool IsLittleEndian)
 
static ScalableVectorTypegetSVEContainerIRType (FixedVectorType *VTy)
 
static FunctiongetStructuredLoadFunction (Module *M, unsigned Factor, bool Scalable, Type *LDVTy, Type *PtrTy)
 
static FunctiongetStructuredStoreFunction (Module *M, unsigned Factor, bool Scalable, Type *STVTy, Type *PtrTy)
 
template<typename Iter >
bool hasNearbyPairedStore (Iter It, Iter End, Value *Ptr, const DataLayout &DL)
 
bool getDeinterleave2Values (Value *DI, SmallVectorImpl< Instruction * > &DeinterleavedValues, SmallVectorImpl< Instruction * > &DeInterleaveDeadInsts)
 
bool getDeinterleave4Values (Value *DI, SmallVectorImpl< Instruction * > &DeinterleavedValues, SmallVectorImpl< Instruction * > &DeInterleaveDeadInsts)
 
bool getDeinterleavedValues (Value *DI, SmallVectorImpl< Instruction * > &DeinterleavedValues, SmallVectorImpl< Instruction * > &DeInterleaveDeadInsts)
 
bool getValuesToInterleave (Value *II, SmallVectorImpl< Value * > &InterleavedValues, SmallVectorImpl< Instruction * > &InterleaveDeadInsts)
 
static SDValue foldVectorXorShiftIntoCmp (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 Turn vector tests of the signbit in the form of: xor (sra X, elt_size(X)-1), -1 into: cmge X, X, #0.
 
static SDValue performVecReduceAddCombineWithUADDLP (SDNode *N, SelectionDAG &DAG)
 
static SDValue performVecReduceAddCombine (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *ST)
 
static SDValue performUADDVAddCombine (SDValue A, SelectionDAG &DAG)
 
static SDValue performUADDVZextCombine (SDValue A, SelectionDAG &DAG)
 
static SDValue performUADDVCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performXorCombine (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static std::optional< unsignedIsSVECntIntrinsic (SDValue S)
 
static EVT calculatePreExtendType (SDValue Extend)
 Calculates what the pre-extend type is, based on the extension operation node provided by Extend.
 
static SDValue performBuildShuffleExtendCombine (SDValue BV, SelectionDAG &DAG)
 Combines a buildvector(sext/zext) or shuffle(sext/zext, undef) node pattern into sext/zext(buildvector) or sext/zext(shuffle) making use of the vector SExt/ZExt rather than the scalar SExt/ZExt.
 
static SDValue performMulVectorExtendCombine (SDNode *Mul, SelectionDAG &DAG)
 Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup)) making use of the vector SExt/ZExt rather than the scalar SExt/ZExt.
 
static SDValue performMulVectorCmpZeroCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performVectorExtCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performMulCombine (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static SDValue performVectorCompareAndMaskUnaryOpCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performIntToFpCombine (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue performFpToIntCombine (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 Fold a floating-point multiply by power of two into floating-point to fixed-point conversion.
 
static SDValue tryCombineToBSL (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64TargetLowering &TLI)
 
static SDValue performANDORCSELCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performORCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
 
static bool isConstantSplatVectorMaskForType (SDNode *N, EVT MemVT)
 
static SDValue performReinterpretCastCombine (SDNode *N)
 
static SDValue performSVEAndCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue performANDSETCCCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue performANDCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue performFADDCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static bool hasPairwiseAdd (unsigned Opcode, EVT VT, bool FullFP16)
 
static SDValue getPTest (SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op, AArch64CC::CondCode Cond)
 
static bool isPredicateCCSettingOp (SDValue N)
 
static SDValue performFirstTrueTestVectorCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static SDValue performLastTrueTestVectorCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static SDValue performExtractVectorEltCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static SDValue performConcatVectorsCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performExtractSubvectorCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performInsertSubvectorCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue tryCombineFixedPointConvert (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue tryExtendDUPToExtractHigh (SDValue N, SelectionDAG &DAG)
 
static bool isEssentiallyExtractHighSubvector (SDValue N)
 
static bool isSetCC (SDValue Op, SetCCInfoAndKind &SetCCInfo)
 Check whether or not Op is a SET_CC operation, either a generic or an AArch64 lowered one.
 
static bool isSetCCOrZExtSetCC (const SDValue &Op, SetCCInfoAndKind &Info)
 
static SDValue performSetccAddFolding (SDNode *Op, SelectionDAG &DAG)
 
static SDValue performAddUADDVCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performAddCSelIntoCSinc (SDNode *N, SelectionDAG &DAG)
 Perform the scalar expression combine in the form of: CSEL(c, 1, cc) + b => CSINC(b+c, b, cc) CSNEG(c, -1, cc) + b => CSINC(b+c, b, cc)
 
static SDValue performAddDotCombine (SDNode *N, SelectionDAG &DAG)
 
static bool isNegatedInteger (SDValue Op)
 
static SDValue getNegatedInteger (SDValue Op, SelectionDAG &DAG)
 
static SDValue performNegCSelCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performAddSubLongCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static bool isCMP (SDValue Op)
 
static std::optional< AArch64CC::CondCodegetCSETCondCode (SDValue Op)
 
static SDValue foldOverflowCheck (SDNode *Op, SelectionDAG &DAG, bool IsAdd)
 
static SDValue foldADCToCINC (SDNode *N, SelectionDAG &DAG)
 
static SDValue performBuildVectorCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performTruncateCombine (SDNode *N, SelectionDAG &DAG)
 
static bool isExtendOrShiftOperand (SDValue N)
 
static SDValue performAddCombineSubShift (SDNode *N, SDValue SUB, SDValue Z, SelectionDAG &DAG)
 
static SDValue performAddCombineForShiftedOperands (SDNode *N, SelectionDAG &DAG)
 
static SDValue performSubAddMULCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performSVEMulAddSubCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue performAddSubIntoVectorOp (SDNode *N, SelectionDAG &DAG)
 
static bool isLoadOrMultipleLoads (SDValue B, SmallVector< LoadSDNode * > &Loads)
 
static bool areLoadedOffsetButOtherwiseSame (SDValue Op0, SDValue Op1, SelectionDAG &DAG, unsigned &NumSubLoads)
 
static SDValue performExtBinopLoadFold (SDNode *N, SelectionDAG &DAG)
 
static SDValue performAddSubCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue tryCombineLongOpWithDup (unsigned IID, SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue tryCombineShiftImm (unsigned IID, SDNode *N, SelectionDAG &DAG)
 
static SDValue tryCombineCRC32 (unsigned Mask, SDNode *N, SelectionDAG &DAG)
 
static SDValue combineAcrossLanesIntrinsic (unsigned Opc, SDNode *N, SelectionDAG &DAG)
 
static SDValue LowerSVEIntrinsicIndex (SDNode *N, SelectionDAG &DAG)
 
static SDValue LowerSVEIntrinsicDUP (SDNode *N, SelectionDAG &DAG)
 
static SDValue LowerSVEIntrinsicEXT (SDNode *N, SelectionDAG &DAG)
 
static SDValue tryConvertSVEWideCompare (SDNode *N, ISD::CondCode CC, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue combineSVEReductionInt (SDNode *N, unsigned Opc, SelectionDAG &DAG)
 
static SDValue combineSVEReductionFP (SDNode *N, unsigned Opc, SelectionDAG &DAG)
 
static SDValue combineSVEReductionOrderedFP (SDNode *N, unsigned Opc, SelectionDAG &DAG)
 
static SDValue convertMergedOpToPredOp (SDNode *N, unsigned Opc, SelectionDAG &DAG, bool UnpredOp=false, bool SwapOperands=false)
 
static SDValue tryCombineWhileLo (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static SDValue performIntrinsicCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static bool isCheapToExtend (const SDValue &N)
 
static SDValue performSignExtendSetCCCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performExtendCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue splitStoreSplat (SelectionDAG &DAG, StoreSDNode &St, SDValue SplatVal, unsigned NumVecElts)
 
static SDValue performLD1Combine (SDNode *N, SelectionDAG &DAG, unsigned Opc)
 
static SDValue performLDNT1Combine (SDNode *N, SelectionDAG &DAG)
 
template<unsigned Opcode>
static SDValue performLD1ReplicateCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performST1Combine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performSTNT1Combine (SDNode *N, SelectionDAG &DAG)
 
static SDValue replaceZeroVectorStore (SelectionDAG &DAG, StoreSDNode &St)
 Replace a splat of zeros to a vector store by scalar stores of WZR/XZR.
 
static SDValue replaceSplatVectorStore (SelectionDAG &DAG, StoreSDNode &St)
 Replace a splat of a scalar to a vector store by scalar stores of the scalar value.
 
static SDValue splitStores (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue performSpliceCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performUnpackCombine (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static bool isHalvingTruncateAndConcatOfLegalIntScalableType (SDNode *N)
 
static SDValue tryCombineExtendRShTrunc (SDNode *N, SelectionDAG &DAG)
 
static SDValue trySimplifySrlAddToRshrnb (SDValue Srl, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue performUzpCombine (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue performGLD1Combine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performVectorShiftCombine (SDNode *N, const AArch64TargetLowering &TLI, TargetLowering::DAGCombinerInfo &DCI)
 Optimize a vector shift instruction and its operand if shifted out bits are not used.
 
static SDValue performSunpkloCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performPostLD1Combine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, bool IsLaneOp)
 Target-specific DAG combine function for post-increment LD1 (lane) and post-increment LD1R.
 
static bool performTBISimplification (SDValue Addr, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 Simplify Addr given that the top byte of it is ignored by HW during address translation.
 
static SDValue foldTruncStoreOfExt (SelectionDAG &DAG, SDNode *N)
 
static SDValue combineV3I8LoadExt (LoadSDNode *LD, SelectionDAG &DAG)
 
static SDValue performLOADCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static EVT tryGetOriginalBoolVectorType (SDValue Op, int Depth=0)
 
static SDValue vectorToScalarBitmask (SDNode *N, SelectionDAG &DAG)
 
static SDValue combineBoolVectorAndTruncateStore (SelectionDAG &DAG, StoreSDNode *Store)
 
bool isHalvingTruncateOfLegalScalableType (EVT SrcVT, EVT DstVT)
 
static SDValue combineI8TruncStore (StoreSDNode *ST, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue performSTORECombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue performMSTORECombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static bool foldIndexIntoBase (SDValue &BasePtr, SDValue &Index, SDValue Scale, SDLoc DL, SelectionDAG &DAG)
 
static bool findMoreOptimalIndexType (const MaskedGatherScatterSDNode *N, SDValue &BasePtr, SDValue &Index, SelectionDAG &DAG)
 
static SDValue performMaskedGatherScatterCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performNEONPostLDSTCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 Target-specific DAG combine function for NEON load/store intrinsics to merge base address updates.
 
static bool checkValueWidth (SDValue V, unsigned width, ISD::LoadExtType &ExtType)
 
static bool isEquivalentMaskless (unsigned CC, unsigned width, ISD::LoadExtType ExtType, int AddConstant, int CompConstant)
 
static SDValue performSubsToAndsCombine (SDNode *N, SDNode *SubsNode, SDNode *AndNode, SelectionDAG &DAG, unsigned CCIndex, unsigned CmpIndex, unsigned CC)
 
static SDValue performCONDCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, unsigned CCIndex, unsigned CmpIndex)
 
static SDValue performBRCONDCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue foldCSELofCTTZ (SDNode *N, SelectionDAG &DAG)
 
static SDValue foldCSELOfCSEL (SDNode *Op, SelectionDAG &DAG)
 
static SDValue performCSELCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue tryToWidenSetCCOperands (SDNode *Op, SelectionDAG &DAG)
 
static SDValue performVecReduceBitwiseCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performSETCCCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performFlagSettingCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned GenericOpcode)
 
static SDValue performSetCCPunpkCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performSetccMergeZeroCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue getTestBitOperand (SDValue Op, unsigned &Bit, bool &Invert, SelectionDAG &DAG)
 
static SDValue performTBZCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue trySwapVSelectOperands (SDNode *N, SelectionDAG &DAG)
 
static SDValue performVSelectCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performSelectCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with the compare-mask instructions rather than going via NZCV, even if LHS and RHS are really scalar.
 
static SDValue performDUPCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue performNVCASTCombine (SDNode *N, SelectionDAG &DAG)
 Get rid of unnecessary NVCASTs (that don't change the type).
 
static SDValue performGlobalAddressCombine (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget, const TargetMachine &TM)
 
static SDValue performCTLZCombine (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue getScaledOffsetForBitWidth (SelectionDAG &DAG, SDValue Offset, SDLoc DL, unsigned BitWidth)
 
static bool isValidImmForSVEVecImmAddrMode (unsigned OffsetInBytes, unsigned ScalarSizeInBytes)
 Check if the value of OffsetInBytes can be used as an immediate for the gather load/prefetch and scatter store instructions with vector base and immediate offset addressing mode:
 
static bool isValidImmForSVEVecImmAddrMode (SDValue Offset, unsigned ScalarSizeInBytes)
 Check if the value of Offset represents a valid immediate for the SVE gather load/prefetch and scatter store instructiona with vector base and immediate offset addressing mode:
 
static SDValue performScatterStoreCombine (SDNode *N, SelectionDAG &DAG, unsigned Opcode, bool OnlyPackedOffsets=true)
 
static SDValue performGatherLoadCombine (SDNode *N, SelectionDAG &DAG, unsigned Opcode, bool OnlyPackedOffsets=true)
 
static SDValue performSignExtendInRegCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue legalizeSVEGatherPrefetchOffsVec (SDNode *N, SelectionDAG &DAG)
 Legalize the gather prefetch (scalar + vector addressing mode) when the offset vector is an unpacked 32-bit scalable vector.
 
static SDValue combineSVEPrefetchVecBaseImmOff (SDNode *N, SelectionDAG &DAG, unsigned ScalarSizeInBytes)
 Combines a node carrying the intrinsic aarch64_sve_prf<T>_gather_scalar_offset into a node that uses aarch64_sve_prfb_gather_uxtw_index when the scalar offset passed to aarch64_sve_prf<T>_gather_scalar_offset is not a valid immediate for the sve gather prefetch instruction with vector plus immediate addressing mode.
 
static bool isLanes1toNKnownZero (SDValue Op)
 
static SDValue removeRedundantInsertVectorElt (SDNode *N)
 
static SDValue performInsertVectorEltCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue performFPExtendCombine (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static SDValue performBSPExpandForSVE (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue performDupLane128Combine (SDNode *N, SelectionDAG &DAG)
 
static SDValue tryCombineMULLWithUZP1 (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performMULLCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performScalarToVectorCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static void replaceBoolVectorBitcast (SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
 
static void CustomNonLegalBITCASTResults (SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, EVT ExtendVT, EVT CastVT)
 
static void ReplaceAddWithADDP (SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static void ReplaceReductionResults (SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, unsigned InterOp, unsigned AcrossOp)
 
static SDValue createGPRPairNode (SelectionDAG &DAG, SDValue V)
 
static void ReplaceCMP_SWAP_128Results (SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static unsigned getAtomicLoad128Opcode (unsigned ISDOpcode, AtomicOrdering Ordering)
 
static void ReplaceATOMIC_LOAD_128Results (SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static ValueUseTlsOffset (IRBuilderBase &IRB, unsigned Offset)
 
static SDValue getPredicateForFixedLengthVector (SelectionDAG &DAG, SDLoc &DL, EVT VT)
 
static SDValue GenerateFixedLengthSVETBL (SDValue Op, SDValue Op1, SDValue Op2, ArrayRef< int > ShuffleMask, EVT VT, EVT ContainerVT, SelectionDAG &DAG)
 
static SDValue emitConditionalComparison (SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue CCOp, AArch64CC::CondCode Predicate, AArch64CC::CondCode OutCC, const SDLoc &DL, SelectionDAG &DAG)
 can be transformed to: not (and (not (and (setCC (cmp C)) (setCD (cmp D)))) (and (not (setCA (cmp A)) (not (setCB (cmp B))))))" which can be implemented as: cmp C ccmp D, inv(CD), CC ccmp A, CA, inv(CD) ccmp B, CB, inv(CA) check for CB flags
 
static bool canEmitConjunction (const SDValue Val, bool &CanNegate, bool &MustBeFirst, bool WillNegate, unsigned Depth=0)
 Returns true if Val is a tree of AND/OR/SETCC operations that can be expressed as a conjunction.
 
static SDValue emitConjunctionRec (SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp, AArch64CC::CondCode Predicate)
 Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain of CCMP/CFCMP ops.
 
static SDValue emitConjunction (SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC)
 Emit expression as a conjunction (a series of CCMP/CFCMP ops).
 

Variables

cl::opt< boolEnableAArch64ELFLocalDynamicTLSGeneration ("aarch64-elf-ldtls-generation", cl::Hidden, cl::desc("Allow AArch64 Local Dynamic TLS code generation"), cl::init(false))
 
static cl::opt< boolEnableOptimizeLogicalImm ("aarch64-enable-logical-imm", cl::Hidden, cl::desc("Enable AArch64 logical imm instruction " "optimization"), cl::init(true))
 
static cl::opt< boolEnableCombineMGatherIntrinsics ("aarch64-enable-mgather-combine", cl::Hidden, cl::desc("Combine extends of AArch64 masked " "gather intrinsics"), cl::init(true))
 
static cl::opt< boolEnableExtToTBL ("aarch64-enable-ext-to-tbl", cl::Hidden, cl::desc("Combine ext and trunc to TBL"), cl::init(true))
 
static cl::opt< unsignedMaxXors ("aarch64-max-xors", cl::init(16), cl::Hidden, cl::desc("Maximum of xors"))
 
cl::opt< boolEnableSVEGISel ("aarch64-enable-gisel-sve", cl::Hidden, cl::desc("Enable / disable SVE scalable vectors in Global ISel"), cl::init(false))
 
static const MVT MVT_CC = MVT::i32
 Value type used for condition codes.
 
static const MCPhysReg GPRArgRegs []
 
static const MCPhysReg FPRArgRegs []
 

Macro Definition Documentation

◆ DEBUG_TYPE

#define DEBUG_TYPE   "aarch64-lower"

Definition at line 109 of file AArch64ISelLowering.cpp.

◆ GET_REGISTER_MATCHER

#define GET_REGISTER_MATCHER

Definition at line 11327 of file AArch64ISelLowering.cpp.

◆ LCALLNAME4

#define LCALLNAME4 (   A,
  B 
)
Value:
LCALLNAMES(A, B, 1) \
LCALLNAMES(A, B, 2) LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8)
#define LCALLNAMES(A, B, N)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

◆ LCALLNAME5

#define LCALLNAME5 (   A,
  B 
)
Value:
LCALLNAMES(A, B, 1) \
LCALLNAMES(A, B, 2) \
LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) LCALLNAMES(A, B, 16)

◆ LCALLNAMES

#define LCALLNAMES (   A,
  B,
  N 
)
Value:
setLibcallName(A##N##_RELAX, #B #N "_relax"); \
setLibcallName(A##N##_ACQ, #B #N "_acq"); \
setLibcallName(A##N##_REL, #B #N "_rel"); \
setLibcallName(A##N##_ACQ_REL, #B #N "_acq_rel");
#define N

◆ MAKE_CASE

#define MAKE_CASE (   V)
Value:
case V: \
return #V;

Enumeration Type Documentation

◆ PredicateConstraint

enum class PredicateConstraint
strong
Enumerator
Uph 
Upl 
Upa 

Definition at line 11618 of file AArch64ISelLowering.cpp.

◆ ReducedGprConstraint

enum class ReducedGprConstraint
strong
Enumerator
Uci 
Ucj 

Definition at line 11650 of file AArch64ISelLowering.cpp.

Function Documentation

◆ addRequiredExtensionForVectorMULL()

static SDValue addRequiredExtensionForVectorMULL ( SDValue  N,
SelectionDAG DAG,
const EVT OrigTy,
const EVT ExtTy,
unsigned  ExtOpcode 
)
static

◆ analyzeCallOperands()

static void analyzeCallOperands ( const AArch64TargetLowering TLI,
const AArch64Subtarget Subtarget,
const TargetLowering::CallLoweringInfo CLI,
CCState CCInfo 
)
static

◆ areExtractExts()

static bool areExtractExts ( Value Ext1,
Value Ext2 
)
static

Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements.

Definition at line 16206 of file AArch64ISelLowering.cpp.

References llvm::PatternMatch::m_Value(), llvm::PatternMatch::m_ZExtOrSExt(), and llvm::PatternMatch::match().

Referenced by llvm::AArch64TargetLowering::shouldSinkOperands(), and llvm::ARMTargetLowering::shouldSinkOperands().

◆ areExtractShuffleVectors()

static bool areExtractShuffleVectors ( Value Op1,
Value Op2,
bool  AllowSplat = false 
)
static

◆ areLoadedOffsetButOtherwiseSame()

static bool areLoadedOffsetButOtherwiseSame ( SDValue  Op0,
SDValue  Op1,
SelectionDAG DAG,
unsigned NumSubLoads 
)
static

◆ areOperandsOfVmullHighP64()

static bool areOperandsOfVmullHighP64 ( Value Op1,
Value Op2 
)
static

Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.

Definition at line 16233 of file AArch64ISelLowering.cpp.

References isOperandOfVmullHighP64().

Referenced by llvm::AArch64TargetLowering::shouldSinkOperands().

◆ calculatePreExtendType()

static EVT calculatePreExtendType ( SDValue  Extend)
static

Calculates what the pre-extend type is, based on the extension operation node provided by Extend.

In the case that Extend is a SIGN_EXTEND or a ZERO_EXTEND, the pre-extend type is pulled directly from the operand, while other extend operations need a bit more inspection to get this information.

Parameters
ExtendThe SDNode from the DAG that represents the extend operation
Returns
The type representing the Extend source type, or MVT::Other if no valid type can be determined

Definition at line 18500 of file AArch64ISelLowering.cpp.

References llvm::ISD::AND, llvm::ISD::AssertSext, llvm::ISD::AssertZext, llvm::SDValue::getNode(), llvm::SDValue::getOpcode(), llvm::SDValue::getOperand(), llvm::SDValue::getValueType(), llvm::VTSDNode::getVT(), llvm::ISD::SIGN_EXTEND, llvm::ISD::SIGN_EXTEND_INREG, and llvm::ISD::ZERO_EXTEND.

Referenced by performBuildShuffleExtendCombine().

◆ callConvSupportsVarArgs()

static bool callConvSupportsVarArgs ( CallingConv::ID  CC)
static

Return true if the call convention supports varargs Currently only those that pass varargs like the C calling convention does are eligible Calling conventions listed in this function must also be properly handled in AArch64Subtarget::isCallingConvWin64.

Definition at line 8042 of file AArch64ISelLowering.cpp.

References llvm::CallingConv::C, CC, and llvm::CallingConv::PreserveNone.

◆ canEmitConjunction()

static bool canEmitConjunction ( const SDValue  Val,
bool CanNegate,
bool MustBeFirst,
bool  WillNegate,
unsigned  Depth = 0 
)
static

Returns true if Val is a tree of AND/OR/SETCC operations that can be expressed as a conjunction.

See CMP;CCMP matching.

Parameters
CanNegateSet to true if we can negate the whole sub-tree just by changing the conditions on the SETCC tests. (this means we can call emitConjunctionRec() with Negate==true on this sub-tree)
MustBeFirstSet to true if this subtree needs to be negated and we cannot do the negation naturally. We are required to emit the subtree first in this case.
WillNegateIs true if are called when the result of this subexpression must be negated. This happens when the outer expression is an OR. We can use this fact to know that we have a double negation (or (or ...) ...) that can be implemented for free.

Definition at line 3639 of file AArch64ISelLowering.cpp.

References llvm::ISD::AND, assert(), canEmitConjunction(), llvm::Depth, llvm::SDNode::getOpcode(), llvm::SDNode::getOperand(), llvm::SDValue::getValueType(), llvm::SDValue::hasOneUse(), llvm::ISD::OR, and llvm::ISD::SETCC.

Referenced by canEmitConjunction(), emitConjunction(), and emitConjunctionRec().

◆ canGuaranteeTCO()

static bool canGuaranteeTCO ( CallingConv::ID  CC,
bool  GuaranteeTailCalls 
)
static

◆ canLowerSRLToRoundingShiftForVT()

static bool canLowerSRLToRoundingShiftForVT ( SDValue  Shift,
EVT  ResVT,
SelectionDAG DAG,
unsigned ShiftValue,
SDValue RShOperand 
)
static

◆ cannotBeIntMin()

static bool cannotBeIntMin ( SDValue  CheckedVal,
SelectionDAG DAG 
)
static

◆ carryFlagToValue()

static SDValue carryFlagToValue ( SDValue  Glue,
EVT  VT,
SelectionDAG DAG,
bool  Invert 
)
static

◆ changeFPCCToAArch64CC()

static void changeFPCCToAArch64CC ( ISD::CondCode  CC,
AArch64CC::CondCode CondCode,
AArch64CC::CondCode CondCode2 
)
static

◆ changeFPCCToANDAArch64CC()

static void changeFPCCToANDAArch64CC ( ISD::CondCode  CC,
AArch64CC::CondCode CondCode,
AArch64CC::CondCode CondCode2 
)
static

Convert a DAG fp condition code to an AArch64 CC.

This differs from changeFPCCToAArch64CC in that it returns cond codes that should be AND'ed instead of OR'ed.

Definition at line 3365 of file AArch64ISelLowering.cpp.

References llvm::AArch64CC::AL, assert(), CC, changeFPCCToAArch64CC(), llvm::AArch64CC::LE, llvm::AArch64CC::NE, llvm::AArch64CC::PL, llvm::ISD::SETONE, llvm::ISD::SETUEQ, and llvm::AArch64CC::VC.

Referenced by emitConjunctionRec().

◆ changeIntCCToAArch64CC()

static AArch64CC::CondCode changeIntCCToAArch64CC ( ISD::CondCode  CC)
static

◆ changeVectorFPCCToAArch64CC()

static void changeVectorFPCCToAArch64CC ( ISD::CondCode  CC,
AArch64CC::CondCode CondCode,
AArch64CC::CondCode CondCode2,
bool Invert 
)
static

changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC usable with the vector instructions.

Fewer operations are available without a real NZCV register, so we have to use less efficient combinations to get the same effect.

Definition at line 3395 of file AArch64ISelLowering.cpp.

References CC, changeFPCCToAArch64CC(), llvm::AArch64CC::GE, llvm::AArch64CC::MI, llvm::ISD::SETO, llvm::ISD::SETUEQ, llvm::ISD::SETUGE, llvm::ISD::SETUGT, llvm::ISD::SETULE, llvm::ISD::SETULT, and llvm::ISD::SETUO.

◆ checkValueWidth()

static bool checkValueWidth ( SDValue  V,
unsigned  width,
ISD::LoadExtType ExtType 
)
static

◆ checkZExtBool()

static bool checkZExtBool ( SDValue  Arg,
const SelectionDAG DAG 
)
static

◆ combineAcrossLanesIntrinsic()

static SDValue combineAcrossLanesIntrinsic ( unsigned  Opc,
SDNode N,
SelectionDAG DAG 
)
static

◆ combineBoolVectorAndTruncateStore()

static SDValue combineBoolVectorAndTruncateStore ( SelectionDAG DAG,
StoreSDNode Store 
)
static

◆ combineI8TruncStore()

static SDValue combineI8TruncStore ( StoreSDNode ST,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ combineSVEPrefetchVecBaseImmOff()

static SDValue combineSVEPrefetchVecBaseImmOff ( SDNode N,
SelectionDAG DAG,
unsigned  ScalarSizeInBytes 
)
static

Combines a node carrying the intrinsic aarch64_sve_prf<T>_gather_scalar_offset into a node that uses aarch64_sve_prfb_gather_uxtw_index when the scalar offset passed to aarch64_sve_prf<T>_gather_scalar_offset is not a valid immediate for the sve gather prefetch instruction with vector plus immediate addressing mode.

Definition at line 25364 of file AArch64ISelLowering.cpp.

References DL, llvm::SelectionDAG::getConstant(), llvm::SelectionDAG::getNode(), llvm::SelectionDAG::getVTList(), isValidImmForSVEVecImmAddrMode(), N, and std::swap().

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ combineSVEReductionFP()

static SDValue combineSVEReductionFP ( SDNode N,
unsigned  Opc,
SelectionDAG DAG 
)
static

◆ combineSVEReductionInt()

static SDValue combineSVEReductionInt ( SDNode N,
unsigned  Opc,
SelectionDAG DAG 
)
static

◆ combineSVEReductionOrderedFP()

static SDValue combineSVEReductionOrderedFP ( SDNode N,
unsigned  Opc,
SelectionDAG DAG 
)
static

◆ combineV3I8LoadExt()

static SDValue combineV3I8LoadExt ( LoadSDNode LD,
SelectionDAG DAG 
)
static

◆ ConstantBuildVector()

static SDValue ConstantBuildVector ( SDValue  Op,
SelectionDAG DAG,
const AArch64Subtarget ST 
)
static

◆ constructDup()

static SDValue constructDup ( SDValue  V,
int  Lane,
SDLoc  dl,
EVT  VT,
unsigned  Opcode,
SelectionDAG DAG 
)
static

◆ convertFixedMaskToScalableVector()

static SDValue convertFixedMaskToScalableVector ( SDValue  Mask,
SelectionDAG DAG 
)
static

◆ convertFromScalableVector()

static SDValue convertFromScalableVector ( SelectionDAG DAG,
EVT  VT,
SDValue  V 
)
static

◆ convertMergedOpToPredOp()

static SDValue convertMergedOpToPredOp ( SDNode N,
unsigned  Opc,
SelectionDAG DAG,
bool  UnpredOp = false,
bool  SwapOperands = false 
)
static

◆ convertToScalableVector()

static SDValue convertToScalableVector ( SelectionDAG DAG,
EVT  VT,
SDValue  V 
)
static

◆ createGPRPairNode()

static SDValue createGPRPairNode ( SelectionDAG DAG,
SDValue  V 
)
static

◆ createTblForTrunc()

static void createTblForTrunc ( TruncInst TI,
bool  IsLittleEndian 
)
static

◆ createTblShuffleForSExt()

static Value * createTblShuffleForSExt ( IRBuilderBase Builder,
Value Op,
FixedVectorType DstTy,
bool  IsLittleEndian 
)
static

◆ createTblShuffleForZExt()

static Value * createTblShuffleForZExt ( IRBuilderBase Builder,
Value Op,
FixedVectorType ZExtTy,
FixedVectorType DstTy,
bool  IsLittleEndian 
)
static

◆ createTblShuffleMask()

static bool createTblShuffleMask ( unsigned  SrcWidth,
unsigned  DstWidth,
unsigned  NumElts,
bool  IsLittleEndian,
SmallVectorImpl< int > &  Mask 
)
static

Definition at line 16554 of file AArch64ISelLowering.cpp.

References assert(), and I.

Referenced by createTblShuffleForSExt(), and createTblShuffleForZExt().

◆ CustomNonLegalBITCASTResults()

static void CustomNonLegalBITCASTResults ( SDNode N,
SmallVectorImpl< SDValue > &  Results,
SelectionDAG DAG,
EVT  ExtendVT,
EVT  CastVT 
)
static

◆ emitComparison()

static SDValue emitComparison ( SDValue  LHS,
SDValue  RHS,
ISD::CondCode  CC,
const SDLoc dl,
SelectionDAG DAG 
)
static

◆ emitConditionalComparison()

static SDValue emitConditionalComparison ( SDValue  LHS,
SDValue  RHS,
ISD::CondCode  CC,
SDValue  CCOp,
AArch64CC::CondCode  Predicate,
AArch64CC::CondCode  OutCC,
const SDLoc DL,
SelectionDAG DAG 
)
static

can be transformed to: not (and (not (and (setCC (cmp C)) (setCD (cmp D)))) (and (not (setCA (cmp A)) (not (setCB (cmp B))))))" which can be implemented as: cmp C ccmp D, inv(CD), CC ccmp A, CA, inv(CD) ccmp B, CB, inv(CA) check for CB flags

A counterexample is "or (and A B) (and C D)" which translates to not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we can only implement 1 of the inner (not) operations, but not both! Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.

Definition at line 3583 of file AArch64ISelLowering.cpp.

References assert(), CC, llvm::AArch64ISD::CCMN, llvm::AArch64ISD::CCMP, DL, llvm::AArch64ISD::FCCMP, llvm::ISD::FP_EXTEND, llvm::SelectionDAG::getConstant(), llvm::AArch64CC::getInvertedCondCode(), llvm::SelectionDAG::getNode(), llvm::AArch64CC::getNZCVToSatisfyCondCode(), llvm::SelectionDAG::getSubtarget(), isCMN(), llvm::isNullConstant(), LHS, MVT_CC, RHS, and llvm::ISD::SUB.

Referenced by emitConjunctionRec().

◆ emitConjunction()

static SDValue emitConjunction ( SelectionDAG DAG,
SDValue  Val,
AArch64CC::CondCode OutCC 
)
static

Emit expression as a conjunction (a series of CCMP/CFCMP ops).

In some cases this is even possible with OR operations in the expression. See CMP;CCMP matching.

See also
emitConjunctionRec().

Definition at line 3813 of file AArch64ISelLowering.cpp.

References llvm::AArch64CC::AL, canEmitConjunction(), and emitConjunctionRec().

Referenced by getAArch64Cmp(), LowerBRCOND(), and performANDSETCCCombine().

◆ emitConjunctionRec()

static SDValue emitConjunctionRec ( SelectionDAG DAG,
SDValue  Val,
AArch64CC::CondCode OutCC,
bool  Negate,
SDValue  CCOp,
AArch64CC::CondCode  Predicate 
)
static

Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain of CCMP/CFCMP ops.

See CMP;CCMP matching. Tries to transform the given i1 producing node Val to a series compare and conditional compare operations.

Returns
an NZCV flags producing node and sets OutCC to the flags that should be tested or returns SDValue() if transformation was not possible. Negate is true if we want this sub-tree being negated just by changing SETCC conditions.

Definition at line 3701 of file AArch64ISelLowering.cpp.

References llvm::AArch64CC::AL, llvm::ISD::AND, assert(), canEmitConjunction(), CC, changeFPCCToANDAArch64CC(), changeIntCCToAArch64CC(), DL, emitComparison(), emitConditionalComparison(), emitConjunctionRec(), llvm::AArch64CC::getInvertedCondCode(), llvm::SDValue::getNode(), llvm::SDNode::getOpcode(), llvm::SDNode::getOperand(), llvm::SDNode::hasOneUse(), LHS, llvm::ISD::OR, RHS, llvm::ISD::SETCC, and std::swap().

Referenced by emitConjunction(), and emitConjunctionRec().

◆ emitStrictFPComparison()

static SDValue emitStrictFPComparison ( SDValue  LHS,
SDValue  RHS,
const SDLoc dl,
SelectionDAG DAG,
SDValue  Chain,
bool  IsSignaling 
)
static

◆ EmitVectorComparison()

static SDValue EmitVectorComparison ( SDValue  LHS,
SDValue  RHS,
AArch64CC::CondCode  CC,
bool  NoNans,
EVT  VT,
const SDLoc dl,
SelectionDAG DAG 
)
static

◆ extractPtrauthBlendDiscriminators()

static std::tuple< SDValue, SDValue > extractPtrauthBlendDiscriminators ( SDValue  Disc,
SelectionDAG DAG 
)
static

◆ findMoreOptimalIndexType()

static bool findMoreOptimalIndexType ( const MaskedGatherScatterSDNode N,
SDValue BasePtr,
SDValue Index,
SelectionDAG DAG 
)
static

◆ foldADCToCINC()

static SDValue foldADCToCINC ( SDNode N,
SelectionDAG DAG 
)
static

◆ foldCSELOfCSEL()

static SDValue foldCSELOfCSEL ( SDNode Op,
SelectionDAG DAG 
)
static

◆ foldCSELofCTTZ()

static SDValue foldCSELofCTTZ ( SDNode N,
SelectionDAG DAG 
)
static

◆ foldIndexIntoBase()

static bool foldIndexIntoBase ( SDValue BasePtr,
SDValue Index,
SDValue  Scale,
SDLoc  DL,
SelectionDAG DAG 
)
static

◆ foldOverflowCheck()

static SDValue foldOverflowCheck ( SDNode Op,
SelectionDAG DAG,
bool  IsAdd 
)
static

◆ foldTruncStoreOfExt()

static SDValue foldTruncStoreOfExt ( SelectionDAG DAG,
SDNode N 
)
static

◆ foldVectorXorShiftIntoCmp()

static SDValue foldVectorXorShiftIntoCmp ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ GenerateFixedLengthSVETBL()

static SDValue GenerateFixedLengthSVETBL ( SDValue  Op,
SDValue  Op1,
SDValue  Op2,
ArrayRef< int >  ShuffleMask,
EVT  VT,
EVT  ContainerVT,
SelectionDAG DAG 
)
static

◆ GeneratePerfectShuffle()

static SDValue GeneratePerfectShuffle ( unsigned  ID,
SDValue  V1,
SDValue  V2,
unsigned  PFEntry,
SDValue  LHS,
SDValue  RHS,
SelectionDAG DAG,
const SDLoc dl 
)
static

◆ GenerateTBL()

static SDValue GenerateTBL ( SDValue  Op,
ArrayRef< int >  ShuffleMask,
SelectionDAG DAG 
)
static

◆ getAArch64Cmp()

static SDValue getAArch64Cmp ( SDValue  LHS,
SDValue  RHS,
ISD::CondCode  CC,
SDValue AArch64cc,
SelectionDAG DAG,
const SDLoc dl 
)
static

◆ getAArch64XALUOOp()

static std::pair< SDValue, SDValue > getAArch64XALUOOp ( AArch64CC::CondCode CC,
SDValue  Op,
SelectionDAG DAG 
)
static

◆ getAtomicLoad128Opcode()

static unsigned getAtomicLoad128Opcode ( unsigned  ISDOpcode,
AtomicOrdering  Ordering 
)
static

◆ getCmpOperandFoldingProfit()

static unsigned getCmpOperandFoldingProfit ( SDValue  Op)
static

Returns how profitable it is to fold a comparison's operand's shift and/or extension operations.

Definition at line 3827 of file AArch64ISelLowering.cpp.

References llvm::ISD::AND, llvm::ISD::SHL, llvm::ISD::SIGN_EXTEND_INREG, llvm::ISD::SRA, and llvm::ISD::SRL.

Referenced by getAArch64Cmp().

◆ getConstantLaneNumOfExtractHalfOperand()

static std::optional< uint64_t > getConstantLaneNumOfExtractHalfOperand ( SDValue Op)
static

◆ getContainerForFixedLengthVector()

static EVT getContainerForFixedLengthVector ( SelectionDAG DAG,
EVT  VT 
)
static

◆ getCSETCondCode()

static std::optional< AArch64CC::CondCode > getCSETCondCode ( SDValue  Op)
static

◆ getDeinterleave2Values()

bool getDeinterleave2Values ( Value DI,
SmallVectorImpl< Instruction * > &  DeinterleavedValues,
SmallVectorImpl< Instruction * > &  DeInterleaveDeadInsts 
)

◆ getDeinterleave4Values()

bool getDeinterleave4Values ( Value DI,
SmallVectorImpl< Instruction * > &  DeinterleavedValues,
SmallVectorImpl< Instruction * > &  DeInterleaveDeadInsts 
)

◆ getDeinterleavedValues()

bool getDeinterleavedValues ( Value DI,
SmallVectorImpl< Instruction * > &  DeinterleavedValues,
SmallVectorImpl< Instruction * > &  DeInterleaveDeadInsts 
)

◆ getDUPLANEOp()

static unsigned getDUPLANEOp ( EVT  EltType)
static

◆ getEstimate()

static SDValue getEstimate ( const AArch64Subtarget ST,
unsigned  Opcode,
SDValue  Operand,
SelectionDAG DAG,
int &  ExtraSteps 
)
static

◆ getExtensionTo64Bits()

static EVT getExtensionTo64Bits ( const EVT OrigVT)
static

◆ getExtFactor()

static unsigned getExtFactor ( SDValue V)
static

getExtFactor - Determine the adjustment factor for the position when generating an "extract from vector registers" instruction.

Definition at line 12093 of file AArch64ISelLowering.cpp.

References llvm::EVT::getSizeInBits().

Referenced by GeneratePerfectShuffle(), and llvm::AArch64TargetLowering::ReconstructShuffle().

◆ getGatherVecOpcode()

unsigned getGatherVecOpcode ( bool  IsScaled,
bool  IsSigned,
bool  NeedsExtend 
)

◆ getIntrinsicID()

static unsigned getIntrinsicID ( const SDNode N)
static

◆ getNegatedInteger()

static SDValue getNegatedInteger ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ getPackedSVEVectorVT() [1/2]

static EVT getPackedSVEVectorVT ( ElementCount  EC)
inlinestatic

Definition at line 197 of file AArch64ISelLowering.cpp.

References llvm_unreachable.

◆ getPackedSVEVectorVT() [2/2]

static EVT getPackedSVEVectorVT ( EVT  VT)
inlinestatic

◆ getPredicateForFixedLengthVector()

static SDValue getPredicateForFixedLengthVector ( SelectionDAG DAG,
SDLoc DL,
EVT  VT 
)
static

◆ getPredicateForScalableVector()

static SDValue getPredicateForScalableVector ( SelectionDAG DAG,
SDLoc DL,
EVT  VT 
)
static

◆ getPredicateForVector()

static SDValue getPredicateForVector ( SelectionDAG DAG,
SDLoc DL,
EVT  VT 
)
static

◆ getPredicateRegisterClass()

static const TargetRegisterClass * getPredicateRegisterClass ( PredicateConstraint  Constraint,
EVT  VT 
)
static

◆ getPromotedVTForPredicate()

static EVT getPromotedVTForPredicate ( EVT  VT)
inlinestatic

◆ getPTest()

static SDValue getPTest ( SelectionDAG DAG,
EVT  VT,
SDValue  Pg,
SDValue  Op,
AArch64CC::CondCode  Cond 
)
static

◆ getPTrue()

static SDValue getPTrue ( SelectionDAG DAG,
SDLoc  DL,
EVT  VT,
int  Pattern 
)
inlinestatic

◆ getReducedGprRegisterClass()

static const TargetRegisterClass * getReducedGprRegisterClass ( ReducedGprConstraint  Constraint,
EVT  VT 
)
static

◆ getReductionSDNode()

static SDValue getReductionSDNode ( unsigned  Op,
SDLoc  DL,
SDValue  ScalarOp,
SelectionDAG DAG 
)
static

◆ getScaledOffsetForBitWidth()

static SDValue getScaledOffsetForBitWidth ( SelectionDAG DAG,
SDValue  Offset,
SDLoc  DL,
unsigned  BitWidth 
)
static

◆ getSETCC()

static SDValue getSETCC ( AArch64CC::CondCode  CC,
SDValue  NZCV,
const SDLoc DL,
SelectionDAG DAG 
)
static

◆ getSignExtendedGatherOpcode()

unsigned getSignExtendedGatherOpcode ( unsigned  Opcode)

◆ getSMCondition()

static unsigned getSMCondition ( const SMEAttrs CallerAttrs,
const SMEAttrs CalleeAttrs 
)
static

◆ getStructuredLoadFunction()

static Function * getStructuredLoadFunction ( Module M,
unsigned  Factor,
bool  Scalable,
Type LDVTy,
Type PtrTy 
)
static

◆ getStructuredStoreFunction()

static Function * getStructuredStoreFunction ( Module M,
unsigned  Factor,
bool  Scalable,
Type STVTy,
Type PtrTy 
)
static

◆ getSVEContainerIRType()

static ScalableVectorType * getSVEContainerIRType ( FixedVectorType VTy)
static

◆ getSVEContainerType()

static MVT getSVEContainerType ( EVT  ContentTy)
static

◆ getSVEPredicateBitCast()

static SDValue getSVEPredicateBitCast ( EVT  VT,
SDValue  Op,
SelectionDAG DAG 
)
static

◆ getTestBitOperand()

static SDValue getTestBitOperand ( SDValue  Op,
unsigned Bit,
bool Invert,
SelectionDAG DAG 
)
static

◆ getValuesToInterleave()

bool getValuesToInterleave ( Value II,
SmallVectorImpl< Value * > &  InterleavedValues,
SmallVectorImpl< Instruction * > &  InterleaveDeadInsts 
)

◆ getVectorBitwiseReduce()

static SDValue getVectorBitwiseReduce ( unsigned  Opcode,
SDValue  Vec,
EVT  VT,
SDLoc  DL,
SelectionDAG DAG 
)
static

◆ getVShiftImm()

static bool getVShiftImm ( SDValue  Op,
unsigned  ElementBits,
int64_t &  Cnt 
)
static

getVShiftImm - Check if this is a valid build_vector for the immediate operand of a vector shift operation, where all the elements of the build_vector must have the same constant integer value.

Definition at line 15049 of file AArch64ISelLowering.cpp.

References llvm::ISD::BITCAST, llvm::APInt::getSExtValue(), and llvm::BuildVectorSDNode::isConstantSplat().

Referenced by isVShiftLImm(), and isVShiftRImm().

◆ hasNearbyPairedStore()

template<typename Iter >
bool hasNearbyPairedStore ( Iter  It,
Iter  End,
Value Ptr,
const DataLayout DL 
)

◆ hasPairwiseAdd()

static bool hasPairwiseAdd ( unsigned  Opcode,
EVT  VT,
bool  FullFP16 
)
static

◆ isAddSubSExt()

static bool isAddSubSExt ( SDValue  N,
SelectionDAG DAG 
)
static

◆ isAddSubZExt()

static bool isAddSubZExt ( SDValue  N,
SelectionDAG DAG 
)
static

◆ isAllActivePredicate()

static bool isAllActivePredicate ( SelectionDAG DAG,
SDValue  N 
)
static

◆ isAllConstantBuildVector()

static bool isAllConstantBuildVector ( const SDValue PotentialBVec,
uint64_t ConstVal 
)
static

◆ isAllInactivePredicate()

static bool isAllInactivePredicate ( SDValue  N)
static

◆ isCheapToExtend()

static bool isCheapToExtend ( const SDValue N)
static

◆ isCMN()

static bool isCMN ( SDValue  Op,
ISD::CondCode  CC,
SelectionDAG DAG 
)
static

◆ isCMP()

static bool isCMP ( SDValue  Op)
static

Definition at line 20563 of file AArch64ISelLowering.cpp.

References llvm::AArch64ISD::SUBS.

Referenced by foldCSELOfCSEL(), and foldOverflowCheck().

◆ isConcatMask()

static bool isConcatMask ( ArrayRef< int >  Mask,
EVT  VT,
bool  SplitLHS 
)
static

◆ isConstantSplatVectorMaskForType()

static bool isConstantSplatVectorMaskForType ( SDNode N,
EVT  MemVT 
)
static

◆ isEquivalentMaskless()

static bool isEquivalentMaskless ( unsigned  CC,
unsigned  width,
ISD::LoadExtType  ExtType,
int  AddConstant,
int  CompConstant 
)
static

◆ isEssentiallyExtractHighSubvector()

static bool isEssentiallyExtractHighSubvector ( SDValue  N)
static

◆ isExtendedBUILD_VECTOR()

static bool isExtendedBUILD_VECTOR ( SDValue  N,
SelectionDAG DAG,
bool  isSigned 
)
static

◆ isExtendOrShiftOperand()

static bool isExtendOrShiftOperand ( SDValue  N)
static

◆ isEXTMask()

static bool isEXTMask ( ArrayRef< int >  M,
EVT  VT,
bool ReverseEXT,
unsigned Imm 
)
static

◆ isHalvingTruncateAndConcatOfLegalIntScalableType()

static bool isHalvingTruncateAndConcatOfLegalIntScalableType ( SDNode N)
static

Definition at line 22552 of file AArch64ISelLowering.cpp.

References llvm::SDNode::getValueType(), N, and llvm::AArch64ISD::UZP1.

Referenced by performUzpCombine().

◆ isHalvingTruncateOfLegalScalableType()

bool isHalvingTruncateOfLegalScalableType ( EVT  SrcVT,
EVT  DstVT 
)

Definition at line 23345 of file AArch64ISelLowering.cpp.

Referenced by performMSTORECombine(), and performSTORECombine().

◆ isINSMask()

static bool isINSMask ( ArrayRef< int >  M,
int  NumInputElements,
bool DstIsLeft,
int &  Anomaly 
)
static

◆ isIntImmediate()

static bool isIntImmediate ( const SDNode N,
uint64_t Imm 
)
static

Definition at line 2172 of file AArch64ISelLowering.cpp.

References llvm::CallingConv::C, and N.

◆ isLanes1toNKnownZero()

static bool isLanes1toNKnownZero ( SDValue  Op)
static

◆ isLegalArithImmed()

static bool isLegalArithImmed ( uint64_t  C)
static

Definition at line 3426 of file AArch64ISelLowering.cpp.

References llvm::CallingConv::C, llvm::dbgs(), and LLVM_DEBUG.

Referenced by getAArch64Cmp().

◆ isLoadOrMultipleLoads()

static bool isLoadOrMultipleLoads ( SDValue  B,
SmallVector< LoadSDNode * > &  Loads 
)
static

◆ isMergePassthruOpcode()

static bool isMergePassthruOpcode ( unsigned  Opc)
static

◆ isNegatedInteger()

static bool isNegatedInteger ( SDValue  Op)
static

Definition at line 20467 of file AArch64ISelLowering.cpp.

References llvm::isNullConstant(), and llvm::ISD::SUB.

Referenced by performNegCSelCombine().

◆ isOpcWithIntImmediate()

static bool isOpcWithIntImmediate ( const SDNode N,
unsigned  Opc,
uint64_t Imm 
)
static

Definition at line 2183 of file AArch64ISelLowering.cpp.

References isIntImmediate(), and N.

◆ isOperandOfVmullHighP64()

static bool isOperandOfVmullHighP64 ( Value Op)
static

◆ isOrXorChain()

static bool isOrXorChain ( SDValue  N,
unsigned Num,
SmallVector< std::pair< SDValue, SDValue >, 16 > &  WorkList 
)
static

◆ isPackedVectorType()

static bool isPackedVectorType ( EVT  VT,
SelectionDAG DAG 
)
inlinestatic

Returns true if VT's elements occupy the lowest bit positions of its associated register class without any intervening space.

For example, nxv2f16, nxv4f16 and nxv8f16 are legal types that belong to the same register class, but only nxv8f16 can be treated as a packed vector.

Definition at line 234 of file AArch64ISelLowering.cpp.

References assert(), llvm::details::FixedOrScalableQuantity< LeafTy, ValueTy >::getKnownMinValue(), llvm::EVT::getSizeInBits(), llvm::SelectionDAG::getTargetLoweringInfo(), llvm::EVT::isFixedLengthVector(), llvm::TargetLoweringBase::isTypeLegal(), llvm::EVT::isVector(), and llvm::AArch64::SVEBitsPerBlock.

◆ isPassedInFPR()

static bool isPassedInFPR ( EVT  VT)
static

◆ isPow2Splat()

static bool isPow2Splat ( SDValue  Op,
uint64_t SplatVal,
bool Negated 
)
static

◆ isPredicateCCSettingOp()

static bool isPredicateCCSettingOp ( SDValue  N)
static

◆ isSetCC()

static bool isSetCC ( SDValue  Op,
SetCCInfoAndKind SetCCInfo 
)
static

Check whether or not Op is a SET_CC operation, either a generic or an AArch64 lowered one.

SetCCInfo is filled accordingly.

Postcondition
SetCCInfo is meanginfull only when this function returns true.
Returns
True when Op is a kind of SET_CC operation.

Definition at line 20230 of file AArch64ISelLowering.cpp.

References SetCCInfo::AArch64, GenericSetCCInfo::CC, AArch64SetCCInfo::CC, AArch64SetCCInfo::Cmp, llvm::AArch64ISD::CSEL, SetCCInfo::Generic, llvm::AArch64CC::getInvertedCondCode(), llvm::ConstantSDNode::isOne(), llvm::ConstantSDNode::isZero(), GenericSetCCInfo::Opnd0, GenericSetCCInfo::Opnd1, llvm::ISD::SETCC, and std::swap().

Referenced by isSetCCOrZExtSetCC().

◆ isSetCCOrZExtSetCC()

static bool isSetCCOrZExtSetCC ( const SDValue Op,
SetCCInfoAndKind Info 
)
static

Definition at line 20273 of file AArch64ISelLowering.cpp.

References Info, isSetCC(), and llvm::ISD::ZERO_EXTEND.

Referenced by performSetccAddFolding().

◆ isSignExtended()

static bool isSignExtended ( SDValue  N,
SelectionDAG DAG 
)
static

◆ isSingletonEXTMask()

static bool isSingletonEXTMask ( ArrayRef< int >  M,
EVT  VT,
unsigned Imm 
)
static

Definition at line 12491 of file AArch64ISelLowering.cpp.

References llvm::EVT::getVectorNumElements().

◆ isSplatShuffle()

static bool isSplatShuffle ( Value V)
static

◆ IsSVECntIntrinsic()

static std::optional< unsigned > IsSVECntIntrinsic ( SDValue  S)
static

Definition at line 18473 of file AArch64ISelLowering.cpp.

References getIntrinsicID(), and llvm::SDValue::getNode().

Referenced by performMulCombine().

◆ isTRN_v_undef_Mask()

static bool isTRN_v_undef_Mask ( ArrayRef< int >  M,
EVT  VT,
unsigned WhichResult 
)
static

isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef".

Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.

Definition at line 12727 of file AArch64ISelLowering.cpp.

References llvm::EVT::getVectorNumElements().

Referenced by llvm::AArch64TargetLowering::isShuffleMaskLegal().

◆ isUZP_v_undef_Mask()

static bool isUZP_v_undef_Mask ( ArrayRef< int >  M,
EVT  VT,
unsigned WhichResult 
)
static

isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef".

Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,

Definition at line 12708 of file AArch64ISelLowering.cpp.

References llvm::EVT::getVectorNumElements(), and Idx.

Referenced by llvm::AArch64TargetLowering::isShuffleMaskLegal().

◆ isValidImmForSVEVecImmAddrMode() [1/2]

static bool isValidImmForSVEVecImmAddrMode ( SDValue  Offset,
unsigned  ScalarSizeInBytes 
)
static

Check if the value of Offset represents a valid immediate for the SVE gather load/prefetch and scatter store instructiona with vector base and immediate offset addressing mode:

 [<Zn>.[S|D]{, #<imm>}]

where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.

Definition at line 24982 of file AArch64ISelLowering.cpp.

References llvm::ConstantSDNode::getZExtValue(), isValidImmForSVEVecImmAddrMode(), and llvm::Offset.

◆ isValidImmForSVEVecImmAddrMode() [2/2]

static bool isValidImmForSVEVecImmAddrMode ( unsigned  OffsetInBytes,
unsigned  ScalarSizeInBytes 
)
inlinestatic

Check if the value of OffsetInBytes can be used as an immediate for the gather load/prefetch and scatter store instructions with vector base and immediate offset addressing mode:

 [<Zn>.[S|D]{, #<imm>}]

where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.

Definition at line 24962 of file AArch64ISelLowering.cpp.

Referenced by combineSVEPrefetchVecBaseImmOff(), isValidImmForSVEVecImmAddrMode(), performGatherLoadCombine(), and performScatterStoreCombine().

◆ isVShiftLImm()

static bool isVShiftLImm ( SDValue  Op,
EVT  VT,
bool  isLong,
int64_t &  Cnt 
)
static

isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left operation.

That value must be in the range: 0 <= Value < ElementBits for a left shift; or 0 <= Value <= ElementBits for a long left shift.

Definition at line 15069 of file AArch64ISelLowering.cpp.

References assert(), llvm::EVT::getScalarSizeInBits(), getVShiftImm(), and llvm::EVT::isVector().

Referenced by LowerShift(), llvm::ARMTargetLowering::PerformIntrinsicCombine(), and PerformShiftCombine().

◆ isVShiftRImm()

static bool isVShiftRImm ( SDValue  Op,
EVT  VT,
bool  isNarrow,
int64_t &  Cnt 
)
static

isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift right operation.

The value must be in the range: 1 <= Value <= ElementBits for a right shift; or

Definition at line 15080 of file AArch64ISelLowering.cpp.

References assert(), llvm::EVT::getScalarSizeInBits(), getVShiftImm(), and llvm::EVT::isVector().

Referenced by LowerShift(), llvm::ARMTargetLowering::PerformIntrinsicCombine(), and PerformShiftCombine().

◆ isWideDUPMask()

static bool isWideDUPMask ( ArrayRef< int >  M,
EVT  VT,
unsigned  BlockSize,
unsigned DupLaneOp 
)
static

Check if a vector shuffle corresponds to a DUP instructions with a larger element width than the vector lane type.

If that is the case the function returns true and writes the value of the DUP instruction lane operand into DupLaneOp

Definition at line 12574 of file AArch64ISelLowering.cpp.

References assert(), llvm::SmallVectorTemplateCommon< T, typename >::begin(), BlockSize, llvm::SmallVectorTemplateCommon< T, typename >::end(), llvm::find_if(), llvm::EVT::getScalarSizeInBits(), llvm::EVT::getSizeInBits(), llvm::EVT::getVectorNumElements(), and I.

◆ isWideTypeMask()

static bool isWideTypeMask ( ArrayRef< int >  M,
EVT  VT,
SmallVectorImpl< int > &  NewMask 
)
static

◆ isZeroExtended()

static bool isZeroExtended ( SDValue  N,
SelectionDAG DAG 
)
static

◆ isZeroingInactiveLanes()

static bool isZeroingInactiveLanes ( SDValue  Op)
static

◆ isZerosVector()

static bool isZerosVector ( const SDNode N)
static

◆ isZIP_v_undef_Mask()

static bool isZIP_v_undef_Mask ( ArrayRef< int >  M,
EVT  VT,
unsigned WhichResult 
)
static

isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef".

Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.

Definition at line 12689 of file AArch64ISelLowering.cpp.

References llvm::EVT::getVectorNumElements(), and Idx.

Referenced by llvm::AArch64TargetLowering::isShuffleMaskLegal().

◆ legalizeSVEGatherPrefetchOffsVec()

static SDValue legalizeSVEGatherPrefetchOffsVec ( SDNode N,
SelectionDAG DAG 
)
static

Legalize the gather prefetch (scalar + vector addressing mode) when the offset vector is an unpacked 32-bit scalable vector.

The other cases (Offset != nxv2i32) do not need legalization.

Definition at line 25341 of file AArch64ISelLowering.cpp.

References llvm::ISD::ANY_EXTEND, DL, llvm::SelectionDAG::getNode(), llvm::SelectionDAG::getVTList(), N, and llvm::Offset.

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ lookThroughSignExtension()

std::pair< SDValue, uint64_t > lookThroughSignExtension ( SDValue  Val)

◆ lowerADDSUBO_CARRY()

static SDValue lowerADDSUBO_CARRY ( SDValue  Op,
SelectionDAG DAG,
unsigned  Opcode,
bool  IsSigned 
)
static

◆ LowerBRCOND()

static SDValue LowerBRCOND ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ LowerFLDEXP()

static SDValue LowerFLDEXP ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ LowerFunnelShift()

static SDValue LowerFunnelShift ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ LowerPREFETCH()

static SDValue LowerPREFETCH ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ LowerSMELdrStr()

SDValue LowerSMELdrStr ( SDValue  N,
SelectionDAG DAG,
bool  IsLoad 
)

◆ LowerSVEIntrinsicDUP()

static SDValue LowerSVEIntrinsicDUP ( SDNode N,
SelectionDAG DAG 
)
static

◆ LowerSVEIntrinsicEXT()

static SDValue LowerSVEIntrinsicEXT ( SDNode N,
SelectionDAG DAG 
)
static

◆ LowerSVEIntrinsicIndex()

static SDValue LowerSVEIntrinsicIndex ( SDNode N,
SelectionDAG DAG 
)
static

◆ LowerTruncateVectorStore()

static SDValue LowerTruncateVectorStore ( SDLoc  DL,
StoreSDNode ST,
EVT  VT,
EVT  MemVT,
SelectionDAG DAG 
)
static

◆ LowerXALUO()

static SDValue LowerXALUO ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ mayTailCallThisCC()

static bool mayTailCallThisCC ( CallingConv::ID  CC)
static

◆ NormalizeBuildVector()

static SDValue NormalizeBuildVector ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ optimizeIncrementingWhile()

static SDValue optimizeIncrementingWhile ( SDValue  Op,
SelectionDAG DAG,
bool  IsSigned,
bool  IsEqual 
)
static

◆ optimizeLogicalImm()

static bool optimizeLogicalImm ( SDValue  Op,
unsigned  Size,
uint64_t  Imm,
const APInt Demanded,
TargetLowering::TargetLoweringOpt TLO,
unsigned  NewOpc 
)
static

◆ overflowFlagToValue()

static SDValue overflowFlagToValue ( SDValue  Glue,
EVT  VT,
SelectionDAG DAG 
)
static

◆ parseConstraintCode()

static AArch64CC::CondCode parseConstraintCode ( llvm::StringRef  Constraint)
static

◆ parsePredicateConstraint()

static std::optional< PredicateConstraint > parsePredicateConstraint ( StringRef  Constraint)
static

◆ parseReducedGprConstraint()

static std::optional< ReducedGprConstraint > parseReducedGprConstraint ( StringRef  Constraint)
static

◆ performAddCombineForShiftedOperands()

static SDValue performAddCombineForShiftedOperands ( SDNode N,
SelectionDAG DAG 
)
static

◆ performAddCombineSubShift()

static SDValue performAddCombineSubShift ( SDNode N,
SDValue  SUB,
SDValue  Z,
SelectionDAG DAG 
)
static

◆ performAddCSelIntoCSinc()

static SDValue performAddCSelIntoCSinc ( SDNode N,
SelectionDAG DAG 
)
static

◆ performAddDotCombine()

static SDValue performAddDotCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performAddSubCombine()

static SDValue performAddSubCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performAddSubIntoVectorOp()

static SDValue performAddSubIntoVectorOp ( SDNode N,
SelectionDAG DAG 
)
static

◆ performAddSubLongCombine()

static SDValue performAddSubLongCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performAddUADDVCombine()

static SDValue performAddUADDVCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performANDCombine()

static SDValue performANDCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performANDORCSELCombine()

static SDValue performANDORCSELCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performANDSETCCCombine()

static SDValue performANDSETCCCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performBRCONDCombine()

static SDValue performBRCONDCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performBSPExpandForSVE()

static SDValue performBSPExpandForSVE ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ performBuildShuffleExtendCombine()

static SDValue performBuildShuffleExtendCombine ( SDValue  BV,
SelectionDAG DAG 
)
static

◆ performBuildVectorCombine()

static SDValue performBuildVectorCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performConcatVectorsCombine()

static SDValue performConcatVectorsCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

Definition at line 19757 of file AArch64ISelLowering.cpp.

References llvm::Add, llvm::ISD::ADD, llvm::all_of(), assert(), llvm::ISD::BITCAST, CC, llvm::ISD::CONCAT_VECTORS, llvm::TargetLowering::DAGCombinerInfo::DAG, llvm::dbgs(), llvm::AArch64ISD::DUP, llvm::AArch64ISD::DUPLANE64, llvm::SelectionDAG::getBitcast(), llvm::SelectionDAG::getBuildVector(), llvm::SelectionDAG::getConstant(), llvm::SDValue::getConstantOperandVal(), llvm::SDNode::getConstantOperandVal(), llvm::SelectionDAG::getContext(), llvm::SelectionDAG::getLoad(), llvm::SDValue::getNode(), llvm::SelectionDAG::getNode(), llvm::SelectionDAG::getNOT(), llvm::SDValue::getOpcode(), llvm::SDNode::getOpcode(), llvm::SDValue::getOperand(), llvm::SDNode::getOperand(), llvm::EVT::getScalarSizeInBits(), llvm::SelectionDAG::getTargetLoweringInfo(), llvm::SelectionDAG::getUNDEF(), llvm::SDValue::getValue(), llvm::SDValue::getValueType(), llvm::MVT::getVectorElementType(), llvm::EVT::getVectorNumElements(), llvm::MVT::getVectorNumElements(), llvm::SelectionDAG::getVectorShuffle(), llvm::EVT::getVectorVT(), llvm::MVT::getVectorVT(), llvm::SDNode::hasOneUse(), llvm::EVT::is128BitVector(), llvm::TargetLowering::DAGCombinerInfo::isBeforeLegalizeOps(), llvm::TargetLoweringBase::isBinOp(), llvm::ISD::isConstantSplatVectorAllOnes(), llvm::SDNode::isOnlyUserOf(), llvm::EVT::isScalableVector(), llvm::SDValue::isUndef(), llvm::MVT::isVector(), LLVM_DEBUG, llvm::AArch64ISD::MOVIshift, N, llvm::SmallVectorTemplateBase< T, bool >::push_back(), llvm::SelectionDAG::ReplaceAllUsesOfValueWith(), RHS, llvm::ISD::TRUNCATE, llvm::AArch64ISD::UZP2, llvm::AArch64ISD::VLSHR, WidenVector(), X, llvm::ISD::XOR, Y, llvm::AArch64ISD::ZIP1, and llvm::AArch64ISD::ZIP2.

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ performCONDCombine()

static SDValue performCONDCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG,
unsigned  CCIndex,
unsigned  CmpIndex 
)
static

◆ performCSELCombine()

static SDValue performCSELCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performCTLZCombine()

static SDValue performCTLZCombine ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ performDUPCombine()

static SDValue performDUPCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performDupLane128Combine()

static SDValue performDupLane128Combine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performExtBinopLoadFold()

static SDValue performExtBinopLoadFold ( SDNode N,
SelectionDAG DAG 
)
static

◆ performExtendCombine()

static SDValue performExtendCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performExtractSubvectorCombine()

static SDValue performExtractSubvectorCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performExtractVectorEltCombine()

static SDValue performExtractVectorEltCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performFADDCombine()

static SDValue performFADDCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performFirstTrueTestVectorCombine()

static SDValue performFirstTrueTestVectorCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performFlagSettingCombine()

static SDValue performFlagSettingCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
unsigned  GenericOpcode 
)
static

◆ performFPExtendCombine()

static SDValue performFPExtendCombine ( SDNode N,
SelectionDAG DAG,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performFpToIntCombine()

static SDValue performFpToIntCombine ( SDNode N,
SelectionDAG DAG,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performGatherLoadCombine()

static SDValue performGatherLoadCombine ( SDNode N,
SelectionDAG DAG,
unsigned  Opcode,
bool  OnlyPackedOffsets = true 
)
static

◆ performGLD1Combine()

static SDValue performGLD1Combine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performGlobalAddressCombine()

static SDValue performGlobalAddressCombine ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget Subtarget,
const TargetMachine TM 
)
static

◆ performInsertSubvectorCombine()

static SDValue performInsertSubvectorCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performInsertVectorEltCombine()

static SDValue performInsertVectorEltCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performIntrinsicCombine()

static SDValue performIntrinsicCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

Definition at line 21724 of file AArch64ISelLowering.cpp.

References llvm::ISD::ABDS, llvm::ISD::ABDU, llvm::ISD::ADD, llvm::ISD::AND, llvm::AArch64ISD::ANDV_PRED, llvm::AArch64CC::ANY_ACTIVE, llvm::AArch64ISD::BIC, combineAcrossLanesIntrinsic(), combineSVEReductionFP(), combineSVEReductionInt(), combineSVEReductionOrderedFP(), convertMergedOpToPredOp(), llvm::TargetLowering::DAGCombinerInfo::DAG, llvm::AArch64ISD::EORV_PRED, llvm::AArch64ISD::FADD_PRED, llvm::AArch64ISD::FADDA_PRED, llvm::AArch64ISD::FADDV_PRED, llvm::AArch64ISD::FDIV_PRED, llvm::AArch64CC::FIRST_ACTIVE, llvm::AArch64ISD::FMA_PRED, llvm::AArch64ISD::FMAX_PRED, llvm::ISD::FMAXIMUM, llvm::AArch64ISD::FMAXNM_PRED, llvm::AArch64ISD::FMAXNMV_PRED, llvm::ISD::FMAXNUM, llvm::AArch64ISD::FMAXV_PRED, llvm::AArch64ISD::FMIN_PRED, llvm::ISD::FMINIMUM, llvm::AArch64ISD::FMINNM_PRED, llvm::AArch64ISD::FMINNMV_PRED, llvm::ISD::FMINNUM, llvm::AArch64ISD::FMINV_PRED, llvm::AArch64ISD::FMUL_PRED, llvm::AArch64ISD::FSUB_PRED, llvm::SelectionDAG::getCondCode(), getIntrinsicID(), llvm::SelectionDAG::getNode(), getPTest(), llvm::AArch64CC::LAST_ACTIVE, LowerSVEIntrinsicDUP(), LowerSVEIntrinsicEXT(), LowerSVEIntrinsicIndex(), llvm::AArch64ISD::MUL_PRED, llvm::AArch64ISD::MULHS_PRED, llvm::AArch64ISD::MULHU_PRED, N, llvm::ISD::OR, llvm::AArch64ISD::ORV_PRED, llvm::AArch64ISD::PMULL, llvm::ISD::SADDSAT, llvm::AArch64ISD::SADDV, llvm::AArch64ISD::SADDV_PRED, llvm::AArch64ISD::SDIV_PRED, llvm::AArch64ISD::SETCC_MERGE_ZERO, llvm::ISD::SETEQ, llvm::ISD::SETGE, llvm::ISD::SETGT, llvm::ISD::SETLE, llvm::ISD::SETLT, llvm::ISD::SETNE, llvm::ISD::SETUGE, llvm::ISD::SETUGT, llvm::ISD::SETULE, llvm::ISD::SETULT, llvm::ISD::SETUO, llvm::AArch64ISD::SHL_PRED, llvm::AArch64ISD::SMAX_PRED, llvm::AArch64ISD::SMAXV, llvm::AArch64ISD::SMAXV_PRED, llvm::AArch64ISD::SMIN_PRED, llvm::AArch64ISD::SMINV, llvm::AArch64ISD::SMINV_PRED, llvm::AArch64ISD::SMULL, llvm::ISD::SPLAT_VECTOR, llvm::AArch64ISD::SRA_PRED, llvm::AArch64ISD::SRAD_MERGE_OP1, llvm::AArch64ISD::SRL_PRED, llvm::ISD::SSUBSAT, llvm::ISD::SUB, tryCombineCRC32(), tryCombineFixedPointConvert(), tryCombineLongOpWithDup(), tryCombineShiftImm(), tryCombineWhileLo(), tryConvertSVEWideCompare(), llvm::ISD::UADDSAT, llvm::AArch64ISD::UADDV, llvm::AArch64ISD::UADDV_PRED, llvm::AArch64ISD::UDIV_PRED, llvm::AArch64ISD::UMAX_PRED, llvm::AArch64ISD::UMAXV, llvm::AArch64ISD::UMAXV_PRED, llvm::AArch64ISD::UMIN_PRED, llvm::AArch64ISD::UMINV, llvm::AArch64ISD::UMINV_PRED, llvm::AArch64ISD::UMULL, llvm::ISD::USUBSAT, llvm::ISD::VSELECT, and llvm::ISD::XOR.

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ performIntToFpCombine()

static SDValue performIntToFpCombine ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ performLastTrueTestVectorCombine()

static SDValue performLastTrueTestVectorCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performLD1Combine()

static SDValue performLD1Combine ( SDNode N,
SelectionDAG DAG,
unsigned  Opc 
)
static

◆ performLD1ReplicateCombine()

template<unsigned Opcode>
static SDValue performLD1ReplicateCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performLDNT1Combine()

static SDValue performLDNT1Combine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performLOADCombine()

static SDValue performLOADCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ performMaskedGatherScatterCombine()

static SDValue performMaskedGatherScatterCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performMSTORECombine()

static SDValue performMSTORECombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ performMulCombine()

static SDValue performMulCombine ( SDNode N,
SelectionDAG DAG,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performMULLCombine()

static SDValue performMULLCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performMulVectorCmpZeroCombine()

static SDValue performMulVectorCmpZeroCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performMulVectorExtendCombine()

static SDValue performMulVectorExtendCombine ( SDNode Mul,
SelectionDAG DAG 
)
static

Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup)) making use of the vector SExt/ZExt rather than the scalar SExt/ZExt.

Definition at line 18602 of file AArch64ISelLowering.cpp.

References DL, llvm::SelectionDAG::getNode(), llvm::Mul, and performBuildShuffleExtendCombine().

Referenced by performMulCombine().

◆ performNegCSelCombine()

static SDValue performNegCSelCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performNEONPostLDSTCombine()

static SDValue performNEONPostLDSTCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performNVCASTCombine()

static SDValue performNVCASTCombine ( SDNode N,
SelectionDAG DAG 
)
static

Get rid of unnecessary NVCASTs (that don't change the type).

Definition at line 24867 of file AArch64ISelLowering.cpp.

References llvm::SelectionDAG::getNode(), N, and llvm::AArch64ISD::NVCAST.

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ performORCombine()

static SDValue performORCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget,
const AArch64TargetLowering TLI 
)
static

◆ performOrXorChainCombine()

static SDValue performOrXorChainCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performPostLD1Combine()

static SDValue performPostLD1Combine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
bool  IsLaneOp 
)
static

◆ performReinterpretCastCombine()

static SDValue performReinterpretCastCombine ( SDNode N)
static

◆ performScalarToVectorCombine()

static SDValue performScalarToVectorCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performScatterStoreCombine()

static SDValue performScatterStoreCombine ( SDNode N,
SelectionDAG DAG,
unsigned  Opcode,
bool  OnlyPackedOffsets = true 
)
static

◆ performSelectCombine()

static SDValue performSelectCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performSetccAddFolding()

static SDValue performSetccAddFolding ( SDNode Op,
SelectionDAG DAG 
)
static

◆ performSETCCCombine()

static SDValue performSETCCCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performSetccMergeZeroCombine()

static SDValue performSetccMergeZeroCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performSetCCPunpkCombine()

static SDValue performSetCCPunpkCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performSignExtendInRegCombine()

static SDValue performSignExtendInRegCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

Definition at line 25209 of file AArch64ISelLowering.cpp.

References assert(), llvm::TargetLowering::DAGCombinerInfo::CombineTo(), DL, EnableCombineMGatherIntrinsics, llvm::SelectionDAG::getContext(), llvm::EVT::getDoubleNumVectorElementsVT(), llvm::SelectionDAG::getNode(), llvm::SDValue::getValue(), llvm::SDValue::getValueType(), llvm::SelectionDAG::getValueType(), llvm::EVT::getVectorElementType(), llvm::SelectionDAG::getVTList(), llvm::AArch64ISD::GLD1_IMM_MERGE_ZERO, llvm::AArch64ISD::GLD1_MERGE_ZERO, llvm::AArch64ISD::GLD1_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLD1_SXTW_MERGE_ZERO, llvm::AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLD1_UXTW_MERGE_ZERO, llvm::AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLD1S_IMM_MERGE_ZERO, llvm::AArch64ISD::GLD1S_MERGE_ZERO, llvm::AArch64ISD::GLD1S_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLD1S_SXTW_MERGE_ZERO, llvm::AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLD1S_UXTW_MERGE_ZERO, llvm::AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_IMM_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_SXTW_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_UXTW_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_IMM_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDNT1_MERGE_ZERO, llvm::AArch64ISD::GLDNT1S_MERGE_ZERO, I, llvm::TargetLowering::DAGCombinerInfo::isBeforeLegalizeOps(), llvm::AArch64ISD::LD1_MERGE_ZERO, llvm::AArch64ISD::LD1S_MERGE_ZERO, llvm::AArch64ISD::LDFF1_MERGE_ZERO, llvm::AArch64ISD::LDFF1S_MERGE_ZERO, llvm::AArch64ISD::LDNF1_MERGE_ZERO, llvm::AArch64ISD::LDNF1S_MERGE_ZERO, N, llvm::SmallVectorTemplateBase< T, bool >::push_back(), llvm::ISD::SIGN_EXTEND_INREG, llvm::AArch64ISD::SUNPKHI, llvm::AArch64ISD::SUNPKLO, llvm::AArch64ISD::UUNPKHI, and llvm::AArch64ISD::UUNPKLO.

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ performSignExtendSetCCCombine()

static SDValue performSignExtendSetCCCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performSpliceCombine()

static SDValue performSpliceCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performST1Combine()

static SDValue performST1Combine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performSTNT1Combine()

static SDValue performSTNT1Combine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performSTORECombine()

static SDValue performSTORECombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ performSubAddMULCombine()

static SDValue performSubAddMULCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performSubsToAndsCombine()

static SDValue performSubsToAndsCombine ( SDNode N,
SDNode SubsNode,
SDNode AndNode,
SelectionDAG DAG,
unsigned  CCIndex,
unsigned  CmpIndex,
unsigned  CC 
)
static

◆ performSunpkloCombine()

static SDValue performSunpkloCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performSVEAndCombine()

static SDValue performSVEAndCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performSVEMulAddSubCombine()

static SDValue performSVEMulAddSubCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performTBISimplification()

static bool performTBISimplification ( SDValue  Addr,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performTBZCombine()

static SDValue performTBZCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performTruncateCombine()

static SDValue performTruncateCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performUADDVAddCombine()

static SDValue performUADDVAddCombine ( SDValue  A,
SelectionDAG DAG 
)
static

◆ performUADDVCombine()

static SDValue performUADDVCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performUADDVZextCombine()

static SDValue performUADDVZextCombine ( SDValue  A,
SelectionDAG DAG 
)
static

◆ performUnpackCombine()

static SDValue performUnpackCombine ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ performUzpCombine()

static SDValue performUzpCombine ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ performVecReduceAddCombine()

static SDValue performVecReduceAddCombine ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget ST 
)
static

◆ performVecReduceAddCombineWithUADDLP()

static SDValue performVecReduceAddCombineWithUADDLP ( SDNode N,
SelectionDAG DAG 
)
static

◆ performVecReduceBitwiseCombine()

static SDValue performVecReduceBitwiseCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performVectorCompareAndMaskUnaryOpCombine()

static SDValue performVectorCompareAndMaskUnaryOpCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performVectorExtCombine()

static SDValue performVectorExtCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performVectorShiftCombine()

static SDValue performVectorShiftCombine ( SDNode N,
const AArch64TargetLowering TLI,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performVSelectCombine()

static SDValue performVSelectCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performXorCombine()

static SDValue performXorCombine ( SDNode N,
SelectionDAG DAG,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ ReconstructShuffleWithRuntimeMask()

SDValue ReconstructShuffleWithRuntimeMask ( SDValue  Op,
SelectionDAG DAG 
)

◆ ReconstructTruncateFromBuildVector()

static SDValue ReconstructTruncateFromBuildVector ( SDValue  V,
SelectionDAG DAG 
)
static

◆ removeRedundantInsertVectorElt()

static SDValue removeRedundantInsertVectorElt ( SDNode N)
static

◆ ReplaceAddWithADDP()

static void ReplaceAddWithADDP ( SDNode N,
SmallVectorImpl< SDValue > &  Results,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ ReplaceATOMIC_LOAD_128Results()

static void ReplaceATOMIC_LOAD_128Results ( SDNode N,
SmallVectorImpl< SDValue > &  Results,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ replaceBoolVectorBitcast()

static void replaceBoolVectorBitcast ( SDNode N,
SmallVectorImpl< SDValue > &  Results,
SelectionDAG DAG 
)
static

◆ ReplaceCMP_SWAP_128Results()

static void ReplaceCMP_SWAP_128Results ( SDNode N,
SmallVectorImpl< SDValue > &  Results,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ ReplaceReductionResults()

static void ReplaceReductionResults ( SDNode N,
SmallVectorImpl< SDValue > &  Results,
SelectionDAG DAG,
unsigned  InterOp,
unsigned  AcrossOp 
)
static

◆ replaceSplatVectorStore()

static SDValue replaceSplatVectorStore ( SelectionDAG DAG,
StoreSDNode St 
)
static

Replace a splat of a scalar to a vector store by scalar stores of the scalar value.

The load store optimizer pass will merge them to store pair stores. This has better performance than a splat of the scalar followed by a split vector store. Even if the stores are not merged it is four stores vs a dup, followed by an ext.b and two stores.

Definition at line 22369 of file AArch64ISelLowering.cpp.

References llvm::SDValue::getOpcode(), llvm::SDValue::getOperand(), llvm::StoreSDNode::getValue(), llvm::SDValue::getValueType(), llvm::EVT::getVectorNumElements(), llvm::ConstantSDNode::getZExtValue(), I, llvm::ISD::INSERT_VECTOR_ELT, llvm::EVT::isFloatingPoint(), llvm::StoreSDNode::isTruncatingStore(), and splitStoreSplat().

Referenced by splitStores().

◆ replaceZeroVectorStore()

static SDValue replaceZeroVectorStore ( SelectionDAG DAG,
StoreSDNode St 
)
static

Replace a splat of zeros to a vector store by scalar stores of WZR/XZR.

The load store optimizer pass will merge them to store pair stores. This should be better than a movi to create the vector zero followed by a vector store if the zero constant is not re-used, since one instructions and one register live range will be removed.

For example, the final generated code should be:

stp xzr, xzr, [x0]

instead of:

movi v0.2d, #0 str q0, [x0]

Definition at line 22302 of file AArch64ISelLowering.cpp.

References llvm::ISD::BUILD_VECTOR, DL, llvm::StoreSDNode::getBasePtr(), llvm::SDNode::getConstantOperandVal(), llvm::SelectionDAG::getCopyFromReg(), llvm::SelectionDAG::getEntryNode(), llvm::SDValue::getOpcode(), llvm::SDValue::getOperand(), llvm::EVT::getSizeInBits(), llvm::StoreSDNode::getValue(), llvm::SDValue::getValueType(), llvm::EVT::getVectorElementType(), llvm::EVT::getVectorNumElements(), llvm::SDValue::hasOneUse(), I, llvm::SelectionDAG::isBaseWithConstantOffset(), llvm::isNullConstant(), llvm::isNullFPConstant(), llvm::EVT::isScalableVector(), llvm::StoreSDNode::isTruncatingStore(), llvm::Offset, and splitStoreSplat().

Referenced by splitStores().

◆ resolveBuildVector()

static bool resolveBuildVector ( BuildVectorSDNode BVN,
APInt CnstBits,
APInt UndefBits 
)
static

◆ selectUmullSmull()

static unsigned selectUmullSmull ( SDValue N0,
SDValue N1,
SelectionDAG DAG,
SDLoc  DL,
bool IsMLA 
)
static

◆ setInfoSVEStN()

template<unsigned NumVecs>
static bool setInfoSVEStN ( const AArch64TargetLowering TLI,
const DataLayout DL,
AArch64TargetLowering::IntrinsicInfo &  Info,
const CallInst CI 
)
static

◆ shouldSinkVectorOfPtrs()

static bool shouldSinkVectorOfPtrs ( Value Ptrs,
SmallVectorImpl< Use * > &  Ops 
)
static

◆ shouldSinkVScale()

static bool shouldSinkVScale ( Value Op,
SmallVectorImpl< Use * > &  Ops 
)
static

We want to sink following cases: (add|sub|gep) A, ((mul|shl) vscale, imm); (add|sub|gep) A, vscale; (add|sub|gep) A, ((mul|shl) zext(vscale), imm);.

Definition at line 16265 of file AArch64ISelLowering.cpp.

References llvm::PatternMatch::m_ConstantInt(), llvm::PatternMatch::m_Mul(), llvm::PatternMatch::m_Shl(), llvm::PatternMatch::m_VScale(), llvm::PatternMatch::m_ZExt(), llvm::PatternMatch::match(), and llvm::SmallVectorTemplateBase< T, bool >::push_back().

Referenced by llvm::AArch64TargetLowering::shouldSinkOperands().

◆ skipExtensionForVectorMULL()

static SDValue skipExtensionForVectorMULL ( SDValue  N,
SelectionDAG DAG 
)
static

◆ splitStores()

static SDValue splitStores ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ splitStoreSplat()

static SDValue splitStoreSplat ( SelectionDAG DAG,
StoreSDNode St,
SDValue  SplatVal,
unsigned  NumVecElts 
)
static

◆ STATISTIC() [1/3]

STATISTIC ( NumOptimizedImms  ,
"Number of times immediates were optimized"   
)

◆ STATISTIC() [2/3]

STATISTIC ( NumShiftInserts  ,
"Number of vector shift inserts"   
)

◆ STATISTIC() [3/3]

STATISTIC ( NumTailCalls  ,
"Number of tail calls"   
)

◆ tryAdvSIMDModImm16()

static SDValue tryAdvSIMDModImm16 ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits,
const SDValue LHS = nullptr 
)
static

◆ tryAdvSIMDModImm32()

static SDValue tryAdvSIMDModImm32 ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits,
const SDValue LHS = nullptr 
)
static

◆ tryAdvSIMDModImm321s()

static SDValue tryAdvSIMDModImm321s ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits 
)
static

◆ tryAdvSIMDModImm64()

static SDValue tryAdvSIMDModImm64 ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits 
)
static

◆ tryAdvSIMDModImm8()

static SDValue tryAdvSIMDModImm8 ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits 
)
static

◆ tryAdvSIMDModImmFP()

static SDValue tryAdvSIMDModImmFP ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits 
)
static

◆ tryCombineCRC32()

static SDValue tryCombineCRC32 ( unsigned  Mask,
SDNode N,
SelectionDAG DAG 
)
static

◆ tryCombineExtendRShTrunc()

static SDValue tryCombineExtendRShTrunc ( SDNode N,
SelectionDAG DAG 
)
static

◆ tryCombineFixedPointConvert()

static SDValue tryCombineFixedPointConvert ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ tryCombineLongOpWithDup()

static SDValue tryCombineLongOpWithDup ( unsigned  IID,
SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ tryCombineMULLWithUZP1()

static SDValue tryCombineMULLWithUZP1 ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ tryCombineShiftImm()

static SDValue tryCombineShiftImm ( unsigned  IID,
SDNode N,
SelectionDAG DAG 
)
static

◆ tryCombineToBSL()

static SDValue tryCombineToBSL ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
const AArch64TargetLowering TLI 
)
static

◆ tryCombineWhileLo()

static SDValue tryCombineWhileLo ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ tryConvertSVEWideCompare()

static SDValue tryConvertSVEWideCompare ( SDNode N,
ISD::CondCode  CC,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ tryExtendDUPToExtractHigh()

static SDValue tryExtendDUPToExtractHigh ( SDValue  N,
SelectionDAG DAG 
)
static

◆ tryFormConcatFromShuffle()

static SDValue tryFormConcatFromShuffle ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ tryGetOriginalBoolVectorType()

static EVT tryGetOriginalBoolVectorType ( SDValue  Op,
int  Depth = 0 
)
static

◆ tryLowerToSLI()

static SDValue tryLowerToSLI ( SDNode N,
SelectionDAG DAG 
)
static

◆ trySimplifySrlAddToRshrnb()

static SDValue trySimplifySrlAddToRshrnb ( SDValue  Srl,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ trySwapVSelectOperands()

static SDValue trySwapVSelectOperands ( SDNode N,
SelectionDAG DAG 
)
static

◆ tryToConvertShuffleOfTbl2ToTbl4()

static SDValue tryToConvertShuffleOfTbl2ToTbl4 ( SDValue  Op,
ArrayRef< int >  ShuffleMask,
SelectionDAG DAG 
)
static

◆ tryToWidenSetCCOperands()

static SDValue tryToWidenSetCCOperands ( SDNode Op,
SelectionDAG DAG 
)
static

◆ tryWhileWRFromOR()

SDValue tryWhileWRFromOR ( SDValue  Op,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)

Try to lower the construction of a pointer alias mask to a WHILEWR.

The mask's enabled lanes represent the elements that will not overlap across one loop iteration. This tries to match: or (splat (setcc_lt (sub ptrA, ptrB), -(element_size - 1))), (get_active_lane_mask 0, (div (sub ptrA, ptrB), element_size))

Definition at line 13941 of file AArch64ISelLowering.cpp.

References llvm::Add, llvm::ISD::ADD, Cond, DL, llvm::SelectionDAG::getConstant(), llvm::SDValue::getConstantOperandVal(), llvm::SelectionDAG::getNode(), llvm::SDValue::getOpcode(), llvm::SDValue::getOperand(), llvm::SDValue::getValueType(), llvm::ISD::INTRINSIC_WO_CHAIN, llvm::isNullConstant(), llvm::isPowerOf2_64(), llvm::Log2_64(), Select, llvm::ISD::SELECT_CC, llvm::ISD::SETCC, llvm::ISD::SETLT, llvm::Splat, llvm::ISD::SPLAT_VECTOR, llvm::ISD::SRA, llvm::ISD::SRL, llvm::ISD::SUB, and std::swap().

◆ tryWidenMaskForShuffle()

static SDValue tryWidenMaskForShuffle ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ UseTlsOffset()

static Value * UseTlsOffset ( IRBuilderBase IRB,
unsigned  Offset 
)
static

◆ valueToCarryFlag()

static SDValue valueToCarryFlag ( SDValue  Value,
SelectionDAG DAG,
bool  Invert 
)
static

◆ vectorToScalarBitmask()

static SDValue vectorToScalarBitmask ( SDNode N,
SelectionDAG DAG 
)
static

◆ WidenVector()

static SDValue WidenVector ( SDValue  V64Reg,
SelectionDAG DAG 
)
static

Variable Documentation

◆ EnableAArch64ELFLocalDynamicTLSGeneration

cl::opt< bool > EnableAArch64ELFLocalDynamicTLSGeneration("aarch64-elf-ldtls-generation", cl::Hidden, cl::desc("Allow AArch64 Local Dynamic TLS code generation"), cl::init(false)) ( "aarch64-elf-ldtls-generation"  ,
cl::Hidden  ,
cl::desc("Allow AArch64 Local Dynamic TLS code generation")  ,
cl::init(false)   
)

◆ EnableCombineMGatherIntrinsics

cl::opt< bool > EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden, cl::desc("Combine extends of AArch64 masked " "gather intrinsics"), cl::init(true)) ( "aarch64-enable-mgather-combine"  ,
cl::Hidden  ,
cl::desc("Combine extends of AArch64 masked " "gather intrinsics")  ,
cl::init(true  
)
static

◆ EnableExtToTBL

cl::opt< bool > EnableExtToTBL("aarch64-enable-ext-to-tbl", cl::Hidden, cl::desc("Combine ext and trunc to TBL"), cl::init(true)) ( "aarch64-enable-ext-to-tbl"  ,
cl::Hidden  ,
cl::desc("Combine ext and trunc to TBL")  ,
cl::init(true  
)
static

◆ EnableOptimizeLogicalImm

cl::opt< bool > EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden, cl::desc("Enable AArch64 logical imm instruction " "optimization"), cl::init(true)) ( "aarch64-enable-logical-imm"  ,
cl::Hidden  ,
cl::desc("Enable AArch64 logical imm instruction " "optimization")  ,
cl::init(true  
)
static

◆ EnableSVEGISel

cl::opt< bool > EnableSVEGISel("aarch64-enable-gisel-sve", cl::Hidden, cl::desc("Enable / disable SVE scalable vectors in Global ISel"), cl::init(false)) ( "aarch64-enable-gisel-sve"  ,
cl::Hidden  ,
cl::desc("Enable / disable SVE scalable vectors in Global ISel")  ,
cl::init(false)   
)

◆ FPRArgRegs

const MCPhysReg FPRArgRegs[]
static
Initial value:
= {AArch64::Q0, AArch64::Q1, AArch64::Q2,
AArch64::Q3, AArch64::Q4, AArch64::Q5,
AArch64::Q6, AArch64::Q7}

Definition at line 164 of file AArch64ISelLowering.cpp.

Referenced by llvm::AArch64::getFPRArgRegs().

◆ GPRArgRegs

const MCPhysReg GPRArgRegs[]
static
Initial value:
= {AArch64::X0, AArch64::X1, AArch64::X2,
AArch64::X3, AArch64::X4, AArch64::X5,
AArch64::X6, AArch64::X7}

Definition at line 161 of file AArch64ISelLowering.cpp.

Referenced by f64AssignAAPCS(), and llvm::AArch64::getGPRArgRegs().

◆ MaxXors

cl::opt< unsigned > MaxXors("aarch64-max-xors", cl::init(16), cl::Hidden, cl::desc("Maximum of xors")) ( "aarch64-max-xors"  ,
cl::init(16)  ,
cl::Hidden  ,
cl::desc("Maximum of xors")   
)
static

Referenced by isOrXorChain().

◆ MVT_CC

const MVT MVT_CC = MVT::i32
static

Value type used for condition codes.

Definition at line 159 of file AArch64ISelLowering.cpp.

Referenced by emitComparison(), emitConditionalComparison(), getAArch64Cmp(), getAArch64XALUOOp(), and performANDORCSELCombine().