LLVM  14.0.0git
Public Member Functions | Static Public Member Functions | Protected Member Functions | List of all members
llvm::AArch64InstrInfo Class Referencefinal

#include "Target/AArch64/AArch64InstrInfo.h"

Inheritance diagram for llvm::AArch64InstrInfo:
Inheritance graph
[legend]
Collaboration diagram for llvm::AArch64InstrInfo:
Collaboration graph
[legend]

Public Member Functions

 AArch64InstrInfo (const AArch64Subtarget &STI)
 
const AArch64RegisterInfogetRegisterInfo () const
 getRegisterInfo - TargetInstrInfo is a superset of MRegister info. More...
 
unsigned getInstSizeInBytes (const MachineInstr &MI) const override
 GetInstSize - Return the number of bytes of code the specified instruction may be. More...
 
bool isAsCheapAsAMove (const MachineInstr &MI) const override
 
bool isCoalescableExtInstr (const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
 
bool areMemAccessesTriviallyDisjoint (const MachineInstr &MIa, const MachineInstr &MIb) const override
 
unsigned isLoadFromStackSlot (const MachineInstr &MI, int &FrameIndex) const override
 
unsigned isStoreToStackSlot (const MachineInstr &MI, int &FrameIndex) const override
 
bool isCandidateToMergeOrPair (const MachineInstr &MI) const
 Return true if this is a load/store that can be potentially paired/merged. More...
 
Optional< ExtAddrModegetAddrModeFromMemoryOp (const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
 
bool getMemOperandsWithOffsetWidth (const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const override
 
bool getMemOperandWithOffsetWidth (const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const
 If OffsetIsScalable is set to 'true', the offset is scaled by vscale. More...
 
MachineOperandgetMemOpBaseRegImmOfsOffsetOperand (MachineInstr &LdSt) const
 Return the immediate offset of the base register in a load/store LdSt. More...
 
bool shouldClusterMemOps (ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2, unsigned NumLoads, unsigned NumBytes) const override
 Detect opportunities for ldp/stp formation. More...
 
void copyPhysRegTuple (MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, llvm::ArrayRef< unsigned > Indices) const
 
void copyGPRRegTuple (MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode, unsigned ZeroReg, llvm::ArrayRef< unsigned > Indices) const
 
void copyPhysReg (MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
 
void storeRegToStackSlot (MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
 
void loadRegFromStackSlot (MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
 
bool isSubregFoldable () const override
 
MachineInstrfoldMemoryOperandImpl (MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
 
bool isBranchOffsetInRange (unsigned BranchOpc, int64_t BrOffset) const override
 
MachineBasicBlockgetBranchDestBlock (const MachineInstr &MI) const override
 
bool analyzeBranch (MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
 
bool analyzeBranchPredicate (MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify) const override
 
unsigned removeBranch (MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
 
unsigned insertBranch (MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
 
bool reverseBranchCondition (SmallVectorImpl< MachineOperand > &Cond) const override
 
bool canInsertSelect (const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
 
void insertSelect (MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
 
MCInst getNop () const override
 
bool isSchedulingBoundary (const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
 
bool analyzeCompare (const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
 analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2, and the value it compares against in CmpValue. More...
 
bool optimizeCompareInstr (MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
 optimizeCompareInstr - Convert the instruction supplying the argument to the comparison into one that sets the zero bit in the flags register. More...
 
bool optimizeCondBranch (MachineInstr &MI) const override
 Replace csincr-branch sequence by simple conditional branch. More...
 
bool isThroughputPattern (MachineCombinerPattern Pattern) const override
 Return true when a code sequence can improve throughput. More...
 
bool getMachineCombinerPatterns (MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const override
 Return true when there is potentially a faster code sequence for an instruction chain ending in Root. More...
 
bool isAssociativeAndCommutative (const MachineInstr &Inst) const override
 Return true when Inst is associative and commutative so that it can be reassociated. More...
 
void genAlternativeCodeSequence (MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
 When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could replace the original code sequence. More...
 
bool useMachineCombiner () const override
 AArch64 supports MachineCombiner. More...
 
bool expandPostRAPseudo (MachineInstr &MI) const override
 
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags (unsigned TF) const override
 
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags () const override
 
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags () const override
 
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags () const override
 
bool isFunctionSafeToOutlineFrom (MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
 
outliner::OutlinedFunction getOutliningCandidateInfo (std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
 
outliner::InstrType getOutliningType (MachineBasicBlock::iterator &MIT, unsigned Flags) const override
 
bool isMBBSafeToOutlineFrom (MachineBasicBlock &MBB, unsigned &Flags) const override
 
void buildOutlinedFrame (MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
 
MachineBasicBlock::iterator insertOutlinedCall (Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, const outliner::Candidate &C) const override
 
bool shouldOutlineFromFunctionByDefault (MachineFunction &MF) const override
 
uint64_t getElementSizeForOpcode (unsigned Opc) const
 Returns the vector element size (B, H, S or D) of an SVE opcode. More...
 
bool isPTestLikeOpcode (unsigned Opc) const
 Returns true if the opcode is for an SVE instruction that sets the condition codes as if it's results had been fed to a PTEST instruction along with the same general predicate. More...
 
bool isWhileOpcode (unsigned Opc) const
 Returns true if the opcode is for an SVE WHILE## instruction. More...
 
Optional< RegImmPairisAddImmediate (const MachineInstr &MI, Register Reg) const override
 
Optional< ParamLoadedValuedescribeLoadedValue (const MachineInstr &MI, Register Reg) const override
 
unsigned int getTailDuplicateSize (CodeGenOpt::Level OptLevel) const override
 
bool isExtendLikelyToBeFolded (MachineInstr &ExtMI, MachineRegisterInfo &MRI) const override
 
virtual MachineInstrfoldMemoryOperandImpl (MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
 Target-dependent implementation for foldMemoryOperand. More...
 
virtual MachineInstrfoldMemoryOperandImpl (MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, LiveIntervals *LIS=nullptr) const
 Target-dependent implementation for foldMemoryOperand. More...
 

Static Public Member Functions

static bool isGPRZero (const MachineInstr &MI)
 Does this instruction set its full destination register to zero? More...
 
static bool isGPRCopy (const MachineInstr &MI)
 Does this instruction rename a GPR without modifying bits? More...
 
static bool isFPRCopy (const MachineInstr &MI)
 Does this instruction rename an FPR without modifying bits? More...
 
static bool isLdStPairSuppressed (const MachineInstr &MI)
 Return true if pairing the given load or store is hinted to be unprofitable. More...
 
static bool isStridedAccess (const MachineInstr &MI)
 Return true if the given load or store is a strided memory access. More...
 
static bool hasUnscaledLdStOffset (unsigned Opc)
 Return true if it has an unscaled load/store offset. More...
 
static bool hasUnscaledLdStOffset (MachineInstr &MI)
 
static Optional< unsigned > getUnscaledLdSt (unsigned Opc)
 Returns the unscaled load/store for the scaled load/store opcode, if there is a corresponding unscaled variant available. More...
 
static int getMemScale (unsigned Opc)
 Scaling factor for (scaled or unscaled) load or store. More...
 
static int getMemScale (const MachineInstr &MI)
 
static bool isPreLd (const MachineInstr &MI)
 Returns whether the instruction is a pre-indexed load. More...
 
static bool isPreSt (const MachineInstr &MI)
 Returns whether the instruction is a pre-indexed store. More...
 
static bool isPreLdSt (const MachineInstr &MI)
 Returns whether the instruction is a pre-indexed load/store. More...
 
static unsigned getLoadStoreImmIdx (unsigned Opc)
 Returns the index for the immediate for a given instruction. More...
 
static bool isPairableLdStInst (const MachineInstr &MI)
 Return true if pairing the given load or store may be paired with another. More...
 
static unsigned convertToFlagSettingOpc (unsigned Opc, bool &Is64Bit)
 Return the opcode that set flags when possible. More...
 
static void suppressLdStPair (MachineInstr &MI)
 Hint that pairing the given load or store is unprofitable. More...
 
static bool getMemOpInfo (unsigned Opcode, TypeSize &Scale, unsigned &Width, int64_t &MinOffset, int64_t &MaxOffset)
 Returns true if opcode Opc is a memory operation. More...
 
static bool isFalkorShiftExtFast (const MachineInstr &MI)
 Returns true if the instruction has a shift by immediate that can be executed in one cycle less. More...
 
static bool isSEHInstruction (const MachineInstr &MI)
 Return true if the instructions is a SEH instruciton used for unwinding on Windows. More...
 
static void decomposeStackOffsetForFrameOffsets (const StackOffset &Offset, int64_t &NumBytes, int64_t &NumPredicateVectors, int64_t &NumDataVectors)
 Returns the offset in parts to which this frame offset can be decomposed for the purpose of describing a frame offset. More...
 
static void decomposeStackOffsetForDwarfOffsets (const StackOffset &Offset, int64_t &ByteSized, int64_t &VGSized)
 

Protected Member Functions

Optional< DestSourcePairisCopyInstrImpl (const MachineInstr &MI) const override
 If the specific machine instruction is an instruction that moves/copies value from one register to another register return destination and source registers as machine operands. More...
 

Detailed Description

Definition at line 38 of file AArch64InstrInfo.h.

Constructor & Destructor Documentation

◆ AArch64InstrInfo()

AArch64InstrInfo::AArch64InstrInfo ( const AArch64Subtarget STI)
explicit

Definition at line 70 of file AArch64InstrInfo.cpp.

Member Function Documentation

◆ analyzeBranch()

bool AArch64InstrInfo::analyzeBranch ( MachineBasicBlock MBB,
MachineBasicBlock *&  TBB,
MachineBasicBlock *&  FBB,
SmallVectorImpl< MachineOperand > &  Cond,
bool  AllowModify = false 
) const
override

◆ analyzeBranchPredicate()

bool AArch64InstrInfo::analyzeBranchPredicate ( MachineBasicBlock MBB,
MachineBranchPredicate &  MBP,
bool  AllowModify 
) const
override

◆ analyzeCompare()

bool AArch64InstrInfo::analyzeCompare ( const MachineInstr MI,
Register SrcReg,
Register SrcReg2,
int64_t &  CmpMask,
int64_t &  CmpValue 
) const
override

analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2, and the value it compares against in CmpValue.

Return true if the comparison instruction can be analyzed.

Definition at line 1114 of file AArch64InstrInfo.cpp.

References assert(), llvm::AArch64_AM::decodeLogicalImmediate(), and MI.

◆ areMemAccessesTriviallyDisjoint()

bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint ( const MachineInstr MIa,
const MachineInstr MIb 
) const
override

◆ buildOutlinedFrame()

void AArch64InstrInfo::buildOutlinedFrame ( MachineBasicBlock MBB,
MachineFunction MF,
const outliner::OutlinedFunction OF 
) const
override

◆ canInsertSelect()

bool AArch64InstrInfo::canInsertSelect ( const MachineBasicBlock MBB,
ArrayRef< MachineOperand Cond,
Register  DstReg,
Register  TrueReg,
Register  FalseReg,
int CondCycles,
int TrueCycles,
int FalseCycles 
) const
override

◆ convertToFlagSettingOpc()

unsigned AArch64InstrInfo::convertToFlagSettingOpc ( unsigned  Opc,
bool &  Is64Bit 
)
static

Return the opcode that set flags when possible.

The caller is responsible for ensuring the opc has a flag setting equivalent.

Definition at line 2388 of file AArch64InstrInfo.cpp.

References llvm_unreachable.

◆ copyGPRRegTuple()

void AArch64InstrInfo::copyGPRRegTuple ( MachineBasicBlock MBB,
MachineBasicBlock::iterator  I,
DebugLoc  DL,
unsigned  DestReg,
unsigned  SrcReg,
bool  KillSrc,
unsigned  Opcode,
unsigned  ZeroReg,
llvm::ArrayRef< unsigned >  Indices 
) const

◆ copyPhysReg()

void AArch64InstrInfo::copyPhysReg ( MachineBasicBlock MBB,
MachineBasicBlock::iterator  I,
const DebugLoc DL,
MCRegister  DestReg,
MCRegister  SrcReg,
bool  KillSrc 
) const
override

◆ copyPhysRegTuple()

void AArch64InstrInfo::copyPhysRegTuple ( MachineBasicBlock MBB,
MachineBasicBlock::iterator  I,
const DebugLoc DL,
MCRegister  DestReg,
MCRegister  SrcReg,
bool  KillSrc,
unsigned  Opcode,
llvm::ArrayRef< unsigned >  Indices 
) const

◆ decomposeMachineOperandsTargetFlags()

std::pair< unsigned, unsigned > AArch64InstrInfo::decomposeMachineOperandsTargetFlags ( unsigned  TF) const
override

◆ decomposeStackOffsetForDwarfOffsets()

void AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets ( const StackOffset Offset,
int64_t &  ByteSized,
int64_t &  VGSized 
)
static

Definition at line 3951 of file AArch64InstrInfo.cpp.

References assert(), and Offset.

◆ decomposeStackOffsetForFrameOffsets()

void AArch64InstrInfo::decomposeStackOffsetForFrameOffsets ( const StackOffset Offset,
int64_t &  NumBytes,
int64_t &  NumPredicateVectors,
int64_t &  NumDataVectors 
)
static

Returns the offset in parts to which this frame offset can be decomposed for the purpose of describing a frame offset.

For non-scalable offsets this is simply its byte size.

Definition at line 3970 of file AArch64InstrInfo.cpp.

References assert(), and Offset.

Referenced by llvm::emitFrameOffset().

◆ describeLoadedValue()

Optional< ParamLoadedValue > AArch64InstrInfo::describeLoadedValue ( const MachineInstr MI,
Register  Reg 
) const
override

◆ expandPostRAPseudo()

bool AArch64InstrInfo::expandPostRAPseudo ( MachineInstr MI) const
override

Definition at line 1864 of file AArch64InstrInfo.cpp.

References llvm::MachineInstrBuilder::addDef(), llvm::MachineInstrBuilder::addGlobalAddress(), llvm::MachineInstrBuilder::addImm(), llvm::MachineInstrBuilder::addMBB(), llvm::MachineInstrBuilder::addMemOperand(), llvm::MachineInstrBuilder::addReg(), llvm::MachineInstrBuilder::addUse(), llvm::AArch64ISD::ADR, llvm::AArch64ISD::ADRP, assert(), llvm::MachineBasicBlock::begin(), llvm::BuildMI(), llvm::ISD::CATCHRET, llvm::AArch64Subtarget::ClassifyGlobalReference(), llvm::RegState::Dead, llvm::RegState::Define, DL, llvm::AArch64SysReg::SysReg::Encoding, llvm::MachineBasicBlock::erase(), llvm::MachineInstr::FrameDestroy, get, llvm::MachineFunction::getFunction(), llvm::TargetSubtargetInfo::getInstrInfo(), llvm::MachineBasicBlock::getParent(), llvm::GlobalValue::getParent(), llvm::AArch64Subtarget::getRegisterInfo(), llvm::TargetRegisterInfo::getSubReg(), llvm::MachineFunction::getSubtarget(), llvm::MachineFunction::getTarget(), llvm::RegState::Implicit, llvm::AArch64Subtarget::isTargetILP32(), llvm::RegState::Kill, llvm::CodeModel::Large, llvm::AArch64ISD::LOADgot, llvm::AArch64SysReg::lookupSysRegByName(), M, MBB, MBBI, MI, llvm::AArch64II::MO_G0, llvm::AArch64II::MO_G1, llvm::AArch64II::MO_G2, llvm::AArch64II::MO_G3, llvm::AArch64II::MO_GOT, llvm::AArch64II::MO_NC, llvm::AArch64II::MO_PAGE, llvm::AArch64II::MO_PAGEOFF, llvm::AArch64ISD::MRS, Offset, Reg, llvm::RegState::Renamable, llvm::report_fatal_error(), TII, llvm::CodeModel::Tiny, TM, and TRI.

◆ foldMemoryOperandImpl() [1/3]

virtual MachineInstr* llvm::TargetInstrInfo::foldMemoryOperandImpl
inline

Target-dependent implementation for foldMemoryOperand.

Target-independent code in foldMemoryOperand will take care of adding a MachineMemOperand to the newly created instruction. The instruction and any auxiliary instructions necessary will be inserted at InsertPt.

Definition at line 1209 of file TargetInstrInfo.h.

◆ foldMemoryOperandImpl() [2/3]

MachineInstr * AArch64InstrInfo::foldMemoryOperandImpl ( MachineFunction MF,
MachineInstr MI,
ArrayRef< unsigned >  Ops,
MachineBasicBlock::iterator  InsertPt,
int  FrameIndex,
LiveIntervals LIS = nullptr,
VirtRegMap VRM = nullptr 
) const
override

◆ foldMemoryOperandImpl() [3/3]

virtual MachineInstr* llvm::TargetInstrInfo::foldMemoryOperandImpl
inline

Target-dependent implementation for foldMemoryOperand.

Target-independent code in foldMemoryOperand will take care of adding a MachineMemOperand to the newly created instruction. The instruction and any auxiliary instructions necessary will be inserted at InsertPt.

Definition at line 1222 of file TargetInstrInfo.h.

◆ genAlternativeCodeSequence()

void AArch64InstrInfo::genAlternativeCodeSequence ( MachineInstr Root,
MachineCombinerPattern  Pattern,
SmallVectorImpl< MachineInstr * > &  InsInstrs,
SmallVectorImpl< MachineInstr * > &  DelInstrs,
DenseMap< unsigned, unsigned > &  InstrIdxForVirtReg 
) const
override

When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could replace the original code sequence.

When getMachineCombinerPatterns() finds potential patterns, this function generates the instructions that could replace the original code sequence.

Definition at line 5353 of file AArch64InstrInfo.cpp.

References Accumulator, llvm::MachineInstrBuilder::add(), llvm::MachineInstrBuilder::addImm(), llvm::MachineInstrBuilder::addReg(), llvm::BuildMI(), llvm::MachineRegisterInfo::createVirtualRegister(), llvm::FMLAv1i32_indexed_OP1, llvm::FMLAv1i32_indexed_OP2, llvm::FMLAv1i64_indexed_OP1, llvm::FMLAv1i64_indexed_OP2, llvm::FMLAv2f32_OP1, llvm::FMLAv2f32_OP2, llvm::FMLAv2f64_OP1, llvm::FMLAv2f64_OP2, llvm::FMLAv2i32_indexed_OP1, llvm::FMLAv2i32_indexed_OP2, llvm::FMLAv2i64_indexed_OP1, llvm::FMLAv2i64_indexed_OP2, llvm::FMLAv4f16_OP1, llvm::FMLAv4f16_OP2, llvm::FMLAv4f32_OP1, llvm::FMLAv4f32_OP2, llvm::FMLAv4i16_indexed_OP1, llvm::FMLAv4i16_indexed_OP2, llvm::FMLAv4i32_indexed_OP1, llvm::FMLAv4i32_indexed_OP2, llvm::FMLAv8f16_OP1, llvm::FMLAv8f16_OP2, llvm::FMLAv8i16_indexed_OP1, llvm::FMLAv8i16_indexed_OP2, llvm::FMLSv1i32_indexed_OP2, llvm::FMLSv1i64_indexed_OP2, llvm::FMLSv2f32_OP1, llvm::FMLSv2f32_OP2, llvm::FMLSv2f64_OP1, llvm::FMLSv2f64_OP2, llvm::FMLSv2i32_indexed_OP1, llvm::FMLSv2i32_indexed_OP2, llvm::FMLSv2i64_indexed_OP1, llvm::FMLSv2i64_indexed_OP2, llvm::FMLSv4f16_OP1, llvm::FMLSv4f16_OP2, llvm::FMLSv4f32_OP1, llvm::FMLSv4f32_OP2, llvm::FMLSv4i16_indexed_OP1, llvm::FMLSv4i16_indexed_OP2, llvm::FMLSv4i32_indexed_OP1, llvm::FMLSv4i32_indexed_OP2, llvm::FMLSv8f16_OP1, llvm::FMLSv8f16_OP2, llvm::FMLSv8i16_indexed_OP1, llvm::FMLSv8i16_indexed_OP2, llvm::FMULADDD_OP1, llvm::FMULADDD_OP2, llvm::FMULADDH_OP1, llvm::FMULADDH_OP2, llvm::FMULADDS_OP1, llvm::FMULADDS_OP2, llvm::FMULSUBD_OP1, llvm::FMULSUBD_OP2, llvm::FMULSUBH_OP1, llvm::FMULSUBH_OP2, llvm::FMULSUBS_OP1, llvm::FMULSUBS_OP2, llvm::FMULv2i32_indexed_OP1, llvm::FMULv2i32_indexed_OP2, llvm::FMULv2i64_indexed_OP1, llvm::FMULv2i64_indexed_OP2, llvm::FMULv4i16_indexed_OP1, llvm::FMULv4i16_indexed_OP2, llvm::FMULv4i32_indexed_OP1, llvm::FMULv4i32_indexed_OP2, llvm::FMULv8i16_indexed_OP1, llvm::FMULv8i16_indexed_OP2, llvm::FNMULSUBD_OP1, llvm::FNMULSUBH_OP1, llvm::FNMULSUBS_OP1, llvm::TargetInstrInfo::genAlternativeCodeSequence(), genFusedMultiply(), genFusedMultiplyAcc(), genFusedMultiplyAccNeg(), genFusedMultiplyIdx(), genFusedMultiplyIdxNeg(), genIndexedMultiply(), genMaddR(), llvm::MachineInstr::getDebugLoc(), llvm::MachineOperand::getImm(), llvm::TargetSubtargetInfo::getInstrInfo(), llvm::MachineInstr::getOperand(), llvm::MachineBasicBlock::getParent(), llvm::MachineInstr::getParent(), llvm::MachineFunction::getRegInfo(), llvm::MachineFunction::getSubtarget(), Indexed, llvm::DenseMapBase< DerivedT, KeyT, ValueT, KeyInfoT, BucketT >::insert(), llvm::MachineOperand::isImm(), MBB, MRI, llvm::ISD::MUL, llvm::MULADDv16i8_OP1, llvm::MULADDv16i8_OP2, llvm::MULADDv2i32_indexed_OP1, llvm::MULADDv2i32_indexed_OP2, llvm::MULADDv2i32_OP1, llvm::MULADDv2i32_OP2, llvm::MULADDv4i16_indexed_OP1, llvm::MULADDv4i16_indexed_OP2, llvm::MULADDv4i16_OP1, llvm::MULADDv4i16_OP2, llvm::MULADDv4i32_indexed_OP1, llvm::MULADDv4i32_indexed_OP2, llvm::MULADDv4i32_OP1, llvm::MULADDv4i32_OP2, llvm::MULADDv8i16_indexed_OP1, llvm::MULADDv8i16_indexed_OP2, llvm::MULADDv8i16_OP1, llvm::MULADDv8i16_OP2, llvm::MULADDv8i8_OP1, llvm::MULADDv8i8_OP2, llvm::MULADDW_OP1, llvm::MULADDW_OP2, llvm::MULADDWI_OP1, llvm::MULADDX_OP1, llvm::MULADDX_OP2, llvm::MULADDXI_OP1, llvm::MULSUBv16i8_OP1, llvm::MULSUBv16i8_OP2, llvm::MULSUBv2i32_indexed_OP1, llvm::MULSUBv2i32_indexed_OP2, llvm::MULSUBv2i32_OP1, llvm::MULSUBv2i32_OP2, llvm::MULSUBv4i16_indexed_OP1, llvm::MULSUBv4i16_indexed_OP2, llvm::MULSUBv4i16_OP1, llvm::MULSUBv4i16_OP2, llvm::MULSUBv4i32_indexed_OP1, llvm::MULSUBv4i32_indexed_OP2, llvm::MULSUBv4i32_OP1, llvm::MULSUBv4i32_OP2, llvm::MULSUBv8i16_indexed_OP1, llvm::MULSUBv8i16_indexed_OP2, llvm::MULSUBv8i16_OP1, llvm::MULSUBv8i16_OP2, llvm::MULSUBv8i8_OP1, llvm::MULSUBv8i8_OP2, llvm::MULSUBW_OP1, llvm::MULSUBW_OP2, llvm::MULSUBWI_OP1, llvm::MULSUBX_OP1, llvm::MULSUBX_OP2, llvm::MULSUBXI_OP1, llvm::AArch64_AM::processLogicalImmediate(), llvm::SignExtend64(), and TII.

◆ getAddrModeFromMemoryOp()

Optional< ExtAddrMode > AArch64InstrInfo::getAddrModeFromMemoryOp ( const MachineInstr MemI,
const TargetRegisterInfo TRI 
) const
override

◆ getBranchDestBlock()

MachineBasicBlock * AArch64InstrInfo::getBranchDestBlock ( const MachineInstr MI) const
override

Definition at line 221 of file AArch64InstrInfo.cpp.

References B, llvm_unreachable, and MI.

Referenced by analyzeBranch().

◆ getElementSizeForOpcode()

uint64_t AArch64InstrInfo::getElementSizeForOpcode ( unsigned  Opc) const

Returns the vector element size (B, H, S or D) of an SVE opcode.

Definition at line 7733 of file AArch64InstrInfo.cpp.

References llvm::AArch64::ElementSizeMask, and get.

◆ getInstSizeInBytes()

unsigned AArch64InstrInfo::getInstSizeInBytes ( const MachineInstr MI) const
override

◆ getLoadStoreImmIdx()

unsigned AArch64InstrInfo::getLoadStoreImmIdx ( unsigned  Opc)
static

Returns the index for the immediate for a given instruction.

Definition at line 2240 of file AArch64InstrInfo.cpp.

Referenced by llvm::isAArch64FrameOffsetLegal().

◆ getMachineCombinerPatterns()

bool AArch64InstrInfo::getMachineCombinerPatterns ( MachineInstr Root,
SmallVectorImpl< MachineCombinerPattern > &  Patterns,
bool  DoRegPressureReduce 
) const
override

Return true when there is potentially a faster code sequence for an instruction chain ending in Root.

Return true when there is potentially a faster code sequence for an instruction chain ending in Root.

All potential patterns are listed in the Patterns array.

All potential patterns are listed in the Pattern vector. Pattern should be sorted in priority order since the pattern evaluator stops checking as soon as it finds a faster sequence.

Definition at line 5091 of file AArch64InstrInfo.cpp.

References getFMAPatterns(), getFMULPatterns(), llvm::TargetInstrInfo::getMachineCombinerPatterns(), and getMaddPatterns().

◆ getMemOpBaseRegImmOfsOffsetOperand()

MachineOperand & AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand ( MachineInstr LdSt) const

Return the immediate offset of the base register in a load/store LdSt.

Definition at line 2629 of file AArch64InstrInfo.cpp.

References assert(), llvm::MachineInstr::getNumExplicitOperands(), llvm::MachineInstr::getOperand(), llvm::MachineOperand::isImm(), and llvm::MachineInstr::mayLoadOrStore().

◆ getMemOperandsWithOffsetWidth()

bool AArch64InstrInfo::getMemOperandsWithOffsetWidth ( const MachineInstr MI,
SmallVectorImpl< const MachineOperand * > &  BaseOps,
int64_t &  Offset,
bool &  OffsetIsScalable,
unsigned &  Width,
const TargetRegisterInfo TRI 
) const
override

◆ getMemOperandWithOffsetWidth()

bool AArch64InstrInfo::getMemOperandWithOffsetWidth ( const MachineInstr MI,
const MachineOperand *&  BaseOp,
int64_t &  Offset,
bool &  OffsetIsScalable,
unsigned &  Width,
const TargetRegisterInfo TRI 
) const

If OffsetIsScalable is set to 'true', the offset is scaled by vscale.

This is true for some SVE instructions like ldr/str that have a 'reg + imm' addressing mode where the immediate is an index to the scalable vector located at 'reg + imm * vscale x #bytes'.

Definition at line 2580 of file AArch64InstrInfo.cpp.

References assert(), llvm::MachineOperand::getImm(), llvm::TypeSize::getKnownMinSize(), getMemOpInfo(), llvm::MachineInstr::getNumExplicitOperands(), llvm::MachineInstr::getOpcode(), llvm::MachineInstr::getOperand(), llvm::MachineOperand::isFI(), llvm::MachineOperand::isImm(), llvm::MachineOperand::isReg(), llvm::LinearPolySize< LeafTy >::isScalable(), llvm::MachineInstr::mayLoadOrStore(), and Offset.

Referenced by areMemAccessesTriviallyDisjoint(), and getMemOperandsWithOffsetWidth().

◆ getMemOpInfo()

bool AArch64InstrInfo::getMemOpInfo ( unsigned  Opcode,
TypeSize Scale,
unsigned &  Width,
int64_t &  MinOffset,
int64_t &  MaxOffset 
)
static

Returns true if opcode Opc is a memory operation.

If it is, set Scale, Width, MinOffset, and MaxOffset accordingly.

For unscaled instructions, Scale is set to 1.

Definition at line 2636 of file AArch64InstrInfo.cpp.

References llvm::TypeSize::Fixed(), llvm::TypeSize::Scalable(), and llvm::AArch64::SVEMaxBitsPerVector.

Referenced by getMemOperandWithOffsetWidth(), and llvm::isAArch64FrameOffsetLegal().

◆ getMemScale() [1/2]

static int llvm::AArch64InstrInfo::getMemScale ( const MachineInstr MI)
inlinestatic

Definition at line 94 of file AArch64InstrInfo.h.

References getMemScale(), and MI.

◆ getMemScale() [2/2]

int AArch64InstrInfo::getMemScale ( unsigned  Opc)
static

Scaling factor for (scaled or unscaled) load or store.

Definition at line 3016 of file AArch64InstrInfo.cpp.

References llvm_unreachable.

Referenced by getMemScale(), getPrePostIndexedMemOpInfo(), scaleOffset(), and shouldClusterFI().

◆ getNop()

MCInst AArch64InstrInfo::getNop ( ) const
override

Definition at line 4450 of file AArch64InstrInfo.cpp.

References llvm::MCInstBuilder::addImm().

◆ getOutliningCandidateInfo()

outliner::OutlinedFunction AArch64InstrInfo::getOutliningCandidateInfo ( std::vector< outliner::Candidate > &  RepeatedSequenceLocs) const
override

◆ getOutliningType()

outliner::InstrType AArch64InstrInfo::getOutliningType ( MachineBasicBlock::iterator MIT,
unsigned  Flags 
) const
override

◆ getRegisterInfo()

const AArch64RegisterInfo& llvm::AArch64InstrInfo::getRegisterInfo ( ) const
inline

getRegisterInfo - TargetInstrInfo is a superset of MRegister info.

As such, whenever a client has an instance of instruction info, it should always be able to get register info as well (through this method).

Definition at line 48 of file AArch64InstrInfo.h.

Referenced by areMemAccessesTriviallyDisjoint(), copyGPRRegTuple(), copyPhysReg(), copyPhysRegTuple(), getOutliningType(), llvm::AArch64Subtarget::getRegisterInfo(), isCandidateToMergeOrPair(), isMBBSafeToOutlineFrom(), loadRegFromStackSlot(), optimizeCondBranch(), and storeRegToStackSlot().

◆ getSerializableBitmaskMachineOperandTargetFlags()

ArrayRef< std::pair< unsigned, const char * > > AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags ( ) const
override

◆ getSerializableDirectMachineOperandTargetFlags()

ArrayRef< std::pair< unsigned, const char * > > AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags ( ) const
override

◆ getSerializableMachineMemOperandTargetFlags()

ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags ( ) const
override

◆ getTailDuplicateSize()

unsigned int AArch64InstrInfo::getTailDuplicateSize ( CodeGenOpt::Level  OptLevel) const
override

Definition at line 7746 of file AArch64InstrInfo.cpp.

References llvm::CodeGenOpt::Aggressive.

◆ getUnscaledLdSt()

Optional< unsigned > AArch64InstrInfo::getUnscaledLdSt ( unsigned  Opc)
static

Returns the unscaled load/store for the scaled load/store opcode, if there is a corresponding unscaled variant available.

Definition at line 2210 of file AArch64InstrInfo.cpp.

Referenced by llvm::isAArch64FrameOffsetLegal().

◆ hasUnscaledLdStOffset() [1/2]

static bool llvm::AArch64InstrInfo::hasUnscaledLdStOffset ( MachineInstr MI)
inlinestatic

Definition at line 84 of file AArch64InstrInfo.h.

References hasUnscaledLdStOffset(), and MI.

◆ hasUnscaledLdStOffset() [2/2]

bool AArch64InstrInfo::hasUnscaledLdStOffset ( unsigned  Opc)
static

Return true if it has an unscaled load/store offset.

Definition at line 2175 of file AArch64InstrInfo.cpp.

Referenced by hasUnscaledLdStOffset(), and shouldClusterMemOps().

◆ insertBranch()

unsigned AArch64InstrInfo::insertBranch ( MachineBasicBlock MBB,
MachineBasicBlock TBB,
MachineBasicBlock FBB,
ArrayRef< MachineOperand Cond,
const DebugLoc DL,
int BytesAdded = nullptr 
) const
override

◆ insertOutlinedCall()

MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall ( Module M,
MachineBasicBlock MBB,
MachineBasicBlock::iterator It,
MachineFunction MF,
const outliner::Candidate C 
) const
override

◆ insertSelect()

void AArch64InstrInfo::insertSelect ( MachineBasicBlock MBB,
MachineBasicBlock::iterator  MI,
const DebugLoc DL,
Register  DstReg,
ArrayRef< MachineOperand Cond,
Register  TrueReg,
Register  FalseReg 
) const
override

◆ isAddImmediate()

Optional< RegImmPair > AArch64InstrInfo::isAddImmediate ( const MachineInstr MI,
Register  Reg 
) const
override

◆ isAsCheapAsAMove()

bool AArch64InstrInfo::isAsCheapAsAMove ( const MachineInstr MI) const
override

◆ isAssociativeAndCommutative()

bool AArch64InstrInfo::isAssociativeAndCommutative ( const MachineInstr Inst) const
override

◆ isBranchOffsetInRange()

bool AArch64InstrInfo::isBranchOffsetInRange ( unsigned  BranchOpc,
int64_t  BrOffset 
) const
override
Returns
true if a branch from an instruction with opcode BranchOpc bytes is capable of jumping to a position BrOffset bytes away.

Definition at line 212 of file AArch64InstrInfo.cpp.

References assert(), llvm::tgtok::Bits, getBranchDisplacementBits(), and llvm::isIntN().

◆ isCandidateToMergeOrPair()

bool AArch64InstrInfo::isCandidateToMergeOrPair ( const MachineInstr MI) const

◆ isCoalescableExtInstr()

bool AArch64InstrInfo::isCoalescableExtInstr ( const MachineInstr MI,
Register SrcReg,
Register DstReg,
unsigned &  SubIdx 
) const
override

Definition at line 1033 of file AArch64InstrInfo.cpp.

References MI.

◆ isCopyInstrImpl()

Optional< DestSourcePair > AArch64InstrInfo::isCopyInstrImpl ( const MachineInstr MI) const
overrideprotected

If the specific machine instruction is an instruction that moves/copies value from one register to another register return destination and source registers as machine operands.

Definition at line 7592 of file AArch64InstrInfo.cpp.

References MI, and llvm::None.

◆ isExtendLikelyToBeFolded()

bool AArch64InstrInfo::isExtendLikelyToBeFolded ( MachineInstr ExtMI,
MachineRegisterInfo MRI 
) const
override

◆ isFalkorShiftExtFast()

bool AArch64InstrInfo::isFalkorShiftExtFast ( const MachineInstr MI)
static

◆ isFPRCopy()

bool AArch64InstrInfo::isFPRCopy ( const MachineInstr MI)
static

Does this instruction rename an FPR without modifying bits?

Definition at line 2088 of file AArch64InstrInfo.cpp.

References assert(), and MI.

◆ isFunctionSafeToOutlineFrom()

bool AArch64InstrInfo::isFunctionSafeToOutlineFrom ( MachineFunction MF,
bool  OutlineFromLinkOnceODRs 
) const
override

◆ isGPRCopy()

bool AArch64InstrInfo::isGPRCopy ( const MachineInstr MI)
static

Does this instruction rename a GPR without modifying bits?

Definition at line 2058 of file AArch64InstrInfo.cpp.

References assert(), contains(), and MI.

◆ isGPRZero()

bool AArch64InstrInfo::isGPRZero ( const MachineInstr MI)
static

Does this instruction set its full destination register to zero?

Definition at line 2034 of file AArch64InstrInfo.cpp.

References assert(), and MI.

◆ isLdStPairSuppressed()

bool AArch64InstrInfo::isLdStPairSuppressed ( const MachineInstr MI)
static

Return true if pairing the given load or store is hinted to be unprofitable.

Check all MachineMemOperands for a hint to suppress pairing.

Definition at line 2155 of file AArch64InstrInfo.cpp.

References llvm::any_of(), and MI.

Referenced by isCandidateToMergeOrPair().

◆ isLoadFromStackSlot()

unsigned AArch64InstrInfo::isLoadFromStackSlot ( const MachineInstr MI,
int FrameIndex 
) const
override

Definition at line 2107 of file AArch64InstrInfo.cpp.

References llvm::ISD::FrameIndex, and MI.

◆ isMBBSafeToOutlineFrom()

bool AArch64InstrInfo::isMBBSafeToOutlineFrom ( MachineBasicBlock MBB,
unsigned &  Flags 
) const
override

◆ isPairableLdStInst()

bool AArch64InstrInfo::isPairableLdStInst ( const MachineInstr MI)
static

Return true if pairing the given load or store may be paired with another.

Definition at line 2346 of file AArch64InstrInfo.cpp.

References MI.

Referenced by shouldClusterMemOps().

◆ isPreLd()

bool AArch64InstrInfo::isPreLd ( const MachineInstr MI)
static

Returns whether the instruction is a pre-indexed load.

Definition at line 3088 of file AArch64InstrInfo.cpp.

References MI.

Referenced by isPreLdSt().

◆ isPreLdSt()

bool AArch64InstrInfo::isPreLdSt ( const MachineInstr MI)
static

Returns whether the instruction is a pre-indexed load/store.

Definition at line 3114 of file AArch64InstrInfo.cpp.

References isPreLd(), isPreSt(), and MI.

Referenced by areCandidatesToMergeOrPair(), getLdStBaseOp(), getLdStOffsetOp(), getLdStRegOp(), and isCandidateToMergeOrPair().

◆ isPreSt()

bool AArch64InstrInfo::isPreSt ( const MachineInstr MI)
static

Returns whether the instruction is a pre-indexed store.

Definition at line 3101 of file AArch64InstrInfo.cpp.

References MI.

Referenced by isPreLdSt().

◆ isPTestLikeOpcode()

bool AArch64InstrInfo::isPTestLikeOpcode ( unsigned  Opc) const

Returns true if the opcode is for an SVE instruction that sets the condition codes as if it's results had been fed to a PTEST instruction along with the same general predicate.

Definition at line 7737 of file AArch64InstrInfo.cpp.

References get, and llvm::AArch64::InstrFlagIsPTestLike.

◆ isSchedulingBoundary()

bool AArch64InstrInfo::isSchedulingBoundary ( const MachineInstr MI,
const MachineBasicBlock MBB,
const MachineFunction MF 
) const
override

◆ isSEHInstruction()

bool AArch64InstrInfo::isSEHInstruction ( const MachineInstr MI)
static

Return true if the instructions is a SEH instruciton used for unwinding on Windows.

Definition at line 1007 of file AArch64InstrInfo.cpp.

References MI.

Referenced by convertCalleeSaveRestoreToSPPrePostIncDec(), llvm::AArch64FrameLowering::emitEpilogue(), fixupCalleeSaveRestoreStackOffset(), and isSchedulingBoundary().

◆ isStoreToStackSlot()

unsigned AArch64InstrInfo::isStoreToStackSlot ( const MachineInstr MI,
int FrameIndex 
) const
override

Definition at line 2130 of file AArch64InstrInfo.cpp.

References llvm::ISD::FrameIndex, and MI.

◆ isStridedAccess()

bool AArch64InstrInfo::isStridedAccess ( const MachineInstr MI)
static

Return true if the given load or store is a strided memory access.

Check all MachineMemOperands for a hint that the load/store is strided.

Definition at line 2169 of file AArch64InstrInfo.cpp.

References llvm::any_of(), and MI.

◆ isSubregFoldable()

bool llvm::AArch64InstrInfo::isSubregFoldable ( ) const
inlineoverride

Definition at line 181 of file AArch64InstrInfo.h.

◆ isThroughputPattern()

bool AArch64InstrInfo::isThroughputPattern ( MachineCombinerPattern  Pattern) const
override

Return true when a code sequence can improve throughput.

It should be called only for instructions in loops.

Parameters
Pattern- combiner pattern

Definition at line 4972 of file AArch64InstrInfo.cpp.

References llvm::FMLAv1i32_indexed_OP1, llvm::FMLAv1i32_indexed_OP2, llvm::FMLAv1i64_indexed_OP1, llvm::FMLAv1i64_indexed_OP2, llvm::FMLAv2f32_OP1, llvm::FMLAv2f32_OP2, llvm::FMLAv2f64_OP1, llvm::FMLAv2f64_OP2, llvm::FMLAv2i32_indexed_OP1, llvm::FMLAv2i32_indexed_OP2, llvm::FMLAv2i64_indexed_OP1, llvm::FMLAv2i64_indexed_OP2, llvm::FMLAv4f16_OP1, llvm::FMLAv4f16_OP2, llvm::FMLAv4f32_OP1, llvm::FMLAv4f32_OP2, llvm::FMLAv4i16_indexed_OP1, llvm::FMLAv4i16_indexed_OP2, llvm::FMLAv4i32_indexed_OP1, llvm::FMLAv4i32_indexed_OP2, llvm::FMLAv8f16_OP1, llvm::FMLAv8f16_OP2, llvm::FMLAv8i16_indexed_OP1, llvm::FMLAv8i16_indexed_OP2, llvm::FMLSv1i32_indexed_OP2, llvm::FMLSv1i64_indexed_OP2, llvm::FMLSv2f32_OP2, llvm::FMLSv2f64_OP2, llvm::FMLSv2i32_indexed_OP2, llvm::FMLSv2i64_indexed_OP2, llvm::FMLSv4f16_OP1, llvm::FMLSv4f16_OP2, llvm::FMLSv4f32_OP2, llvm::FMLSv4i16_indexed_OP1, llvm::FMLSv4i16_indexed_OP2, llvm::FMLSv4i32_indexed_OP2, llvm::FMLSv8f16_OP1, llvm::FMLSv8f16_OP2, llvm::FMLSv8i16_indexed_OP1, llvm::FMLSv8i16_indexed_OP2, llvm::FMULADDD_OP1, llvm::FMULADDD_OP2, llvm::FMULADDH_OP1, llvm::FMULADDH_OP2, llvm::FMULADDS_OP1, llvm::FMULADDS_OP2, llvm::FMULSUBD_OP1, llvm::FMULSUBD_OP2, llvm::FMULSUBH_OP1, llvm::FMULSUBH_OP2, llvm::FMULSUBS_OP1, llvm::FMULSUBS_OP2, llvm::FMULv2i32_indexed_OP1, llvm::FMULv2i32_indexed_OP2, llvm::FMULv2i64_indexed_OP1, llvm::FMULv2i64_indexed_OP2, llvm::FMULv4i16_indexed_OP1, llvm::FMULv4i16_indexed_OP2, llvm::FMULv4i32_indexed_OP1, llvm::FMULv4i32_indexed_OP2, llvm::FMULv8i16_indexed_OP1, llvm::FMULv8i16_indexed_OP2, llvm::FNMULSUBD_OP1, llvm::FNMULSUBH_OP1, llvm::FNMULSUBS_OP1, llvm::MULADDv16i8_OP1, llvm::MULADDv16i8_OP2, llvm::MULADDv2i32_indexed_OP1, llvm::MULADDv2i32_indexed_OP2, llvm::MULADDv2i32_OP1, llvm::MULADDv2i32_OP2, llvm::MULADDv4i16_indexed_OP1, llvm::MULADDv4i16_indexed_OP2, llvm::MULADDv4i16_OP1, llvm::MULADDv4i16_OP2, llvm::MULADDv4i32_indexed_OP1, llvm::MULADDv4i32_indexed_OP2, llvm::MULADDv4i32_OP1, llvm::MULADDv4i32_OP2, llvm::MULADDv8i16_indexed_OP1, llvm::MULADDv8i16_indexed_OP2, llvm::MULADDv8i16_OP1, llvm::MULADDv8i16_OP2, llvm::MULADDv8i8_OP1, llvm::MULADDv8i8_OP2, llvm::MULSUBv16i8_OP1, llvm::MULSUBv16i8_OP2, llvm::MULSUBv2i32_indexed_OP1, llvm::MULSUBv2i32_indexed_OP2, llvm::MULSUBv2i32_OP1, llvm::MULSUBv2i32_OP2, llvm::MULSUBv4i16_indexed_OP1, llvm::MULSUBv4i16_indexed_OP2, llvm::MULSUBv4i16_OP1, llvm::MULSUBv4i16_OP2, llvm::MULSUBv4i32_indexed_OP1, llvm::MULSUBv4i32_indexed_OP2, llvm::MULSUBv4i32_OP1, llvm::MULSUBv4i32_OP2, llvm::MULSUBv8i16_indexed_OP1, llvm::MULSUBv8i16_indexed_OP2, llvm::MULSUBv8i16_OP1, llvm::MULSUBv8i16_OP2, llvm::MULSUBv8i8_OP1, and llvm::MULSUBv8i8_OP2.

◆ isWhileOpcode()

bool AArch64InstrInfo::isWhileOpcode ( unsigned  Opc) const

Returns true if the opcode is for an SVE WHILE## instruction.

Definition at line 7741 of file AArch64InstrInfo.cpp.

References get, and llvm::AArch64::InstrFlagIsWhile.

◆ loadRegFromStackSlot()

void AArch64InstrInfo::loadRegFromStackSlot ( MachineBasicBlock MBB,
MachineBasicBlock::iterator  MBBI,
Register  DestReg,
int  FrameIndex,
const TargetRegisterClass RC,
const TargetRegisterInfo TRI 
) const
override

◆ optimizeCompareInstr()

bool AArch64InstrInfo::optimizeCompareInstr ( MachineInstr CmpInstr,
Register  SrcReg,
Register  SrcReg2,
int64_t  CmpMask,
int64_t  CmpValue,
const MachineRegisterInfo MRI 
) const
override

optimizeCompareInstr - Convert the instruction supplying the argument to the comparison into one that sets the zero bit in the flags register.

Try to optimize a compare instruction.

A compare instruction is an instruction which produces AArch64::NZCV. It can be truly compare instruction when there are no uses of its destination register.

The following steps are tried in order:

  1. Convert CmpInstr into an unconditional version.
  2. Remove CmpInstr if above there is an instruction producing a needed condition code or an instruction which can be converted into such an instruction. Only comparison with zero is supported.

Definition at line 1429 of file AArch64InstrInfo.cpp.

References assert(), convertToNonFlagSettingOpc(), llvm::MachineInstr::definesRegister(), llvm::MachineInstr::eraseFromParent(), llvm::MachineInstr::findRegisterDefOperandIdx(), get, llvm::MachineInstr::getOpcode(), llvm::MachineInstr::getOperand(), llvm::MachineInstr::getParent(), llvm::MachineOperand::getReg(), MRI, llvm::MachineInstr::RemoveOperand(), llvm::MachineInstr::setDesc(), UpdateOperandRegClass(), and llvm::MachineRegisterInfo::use_nodbg_empty().

◆ optimizeCondBranch()

bool AArch64InstrInfo::optimizeCondBranch ( MachineInstr MI) const
override

Replace csincr-branch sequence by simple conditional branch.

Examples:

  1. csinc w9, wzr, wzr, <condition code>
    tbnz w9, #0, 0x44
    to
    b.<inverted condition code>
  2. csinc w9, wzr, wzr, <condition code>
    tbz w9, #0, 0x44
    to
    b.<condition code>

Replace compare and branch sequence by TBZ/TBNZ instruction when the compare's constant operand is power of 2.

Examples:

and w8, w8, #0x400
cbnz w8, L1

to

tbnz w8, #10, L1
Parameters
MIConditional Branch
Returns
True when the simple conditional branch is generated

Definition at line 6266 of file AArch64InstrInfo.cpp.

References llvm::MachineInstrBuilder::addImm(), llvm::MachineInstrBuilder::addMBB(), llvm::MachineInstrBuilder::addReg(), AK_Write, areCFlagsAccessedBetweenInstrs(), assert(), llvm::BuildMI(), llvm::AArch64_AM::decodeLogicalImmediate(), llvm::MachineRegisterInfo::def_empty(), DefMI, DL, llvm::MachineInstr::findRegisterDefOperandIdx(), get, llvm::MachineOperand::getImm(), llvm::AArch64CC::getInvertedCondCode(), llvm::MachineInstr::getOpcode(), llvm::MachineInstr::getOperand(), llvm::MachineBasicBlock::getParent(), llvm::MachineInstr::getParent(), llvm::MachineOperand::getReg(), llvm::MachineFunction::getRegInfo(), getRegisterInfo(), llvm::MachineRegisterInfo::getVRegDef(), llvm::MachineRegisterInfo::hasOneDef(), llvm::MachineRegisterInfo::hasOneNonDBGUse(), llvm::MachineInstr::isCopy(), llvm::isPowerOf2_64(), llvm::Register::isVirtualRegister(), llvm_unreachable, llvm::Log2_64(), llvm::BitmaskEnumDetail::Mask(), MBB, MI, MRI, llvm::MachineOperand::setIsKill(), and llvm::MachineOperand::setSubReg().

◆ removeBranch()

unsigned AArch64InstrInfo::removeBranch ( MachineBasicBlock MBB,
int BytesRemoved = nullptr 
) const
override

◆ reverseBranchCondition()

bool AArch64InstrInfo::reverseBranchCondition ( SmallVectorImpl< MachineOperand > &  Cond) const
override

◆ shouldClusterMemOps()

bool AArch64InstrInfo::shouldClusterMemOps ( ArrayRef< const MachineOperand * >  BaseOps1,
ArrayRef< const MachineOperand * >  BaseOps2,
unsigned  NumLoads,
unsigned  NumBytes 
) const
override

◆ shouldOutlineFromFunctionByDefault()

bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault ( MachineFunction MF) const
override

◆ storeRegToStackSlot()

void AArch64InstrInfo::storeRegToStackSlot ( MachineBasicBlock MBB,
MachineBasicBlock::iterator  MBBI,
Register  SrcReg,
bool  isKill,
int  FrameIndex,
const TargetRegisterClass RC,
const TargetRegisterInfo TRI 
) const
override

◆ suppressLdStPair()

void AArch64InstrInfo::suppressLdStPair ( MachineInstr MI)
static

Hint that pairing the given load or store is unprofitable.

Set a flag on the first MachineMemOperand to suppress pairing.

Definition at line 2162 of file AArch64InstrInfo.cpp.

References MI, and llvm::MOSuppressPair.

◆ useMachineCombiner()

bool AArch64InstrInfo::useMachineCombiner ( ) const
override

AArch64 supports MachineCombiner.

Definition at line 4455 of file AArch64InstrInfo.cpp.


The documentation for this class was generated from the following files:
and
We currently generate a but we really shouldn eax ecx xorl edx divl ecx eax divl ecx movl eax ret A similar code sequence works for division We currently compile i32 v2 eax eax jo LBB1_2 and
Definition: README.txt:1271
b
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int b
Definition: README.txt:418
code
*Add support for compiling functions in both ARM and Thumb then taking the smallest *Add support for compiling individual basic blocks in thumb when in a larger ARM function This can be used for presumed cold code
Definition: README-Thumb.txt:9