13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
22#define GET_INSTRINFO_HEADER
23#include "AArch64GenInstrInfo.inc"
27class AArch64Subtarget;
34#define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
197 Register &DstReg,
unsigned &SubIdx)
const override;
204 int &FrameIndex)
const override;
206 int &FrameIndex)
const override;
295 std::optional<ExtAddrMode>
317 int64_t &
Offset,
bool &OffsetIsScalable,
332 int64_t &MinOffset, int64_t &MaxOffset);
335 int64_t Offset1,
bool OffsetIsScalable1,
337 int64_t Offset2,
bool OffsetIsScalable2,
338 unsigned ClusterSize,
339 unsigned NumBytes)
const override;
343 MCRegister SrcReg,
bool KillSrc,
unsigned Opcode,
347 bool KillSrc,
unsigned Opcode,
unsigned ZeroReg,
351 bool KillSrc,
bool RenamableDest =
false,
352 bool RenamableSrc =
false)
const override;
380 int64_t BrOffset)
const override;
392 bool AllowModify =
false)
const override;
394 MachineBranchPredicate &MBP,
395 bool AllowModify)
const override;
397 int *BytesRemoved =
nullptr)
const override;
401 int *BytesAdded =
nullptr)
const override;
403 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
410 int &)
const override;
429 Register &SrcReg2, int64_t &CmpMask,
430 int64_t &CmpValue)
const override;
434 Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
448 bool DoRegPressureReduce)
const override;
452 bool isAssociativeAndCommutative(
const MachineInstr &Inst,
453 bool Invert)
const override;
457 bool isAccumulationOpcode(
unsigned Opcode)
const override;
460 unsigned getAccumulationStartOpcode(
unsigned Opcode)
const override;
463 getReduceOpcodeForAccumulator(
unsigned int AccumulatorOpCode)
const override;
468 void genAlternativeCodeSequence(
474 bool useMachineCombiner()
const override;
478 std::pair<unsigned, unsigned>
479 decomposeMachineOperandsTargetFlags(
unsigned TF)
const override;
481 getSerializableDirectMachineOperandTargetFlags()
const override;
483 getSerializableBitmaskMachineOperandTargetFlags()
const override;
485 getSerializableMachineMemOperandTargetFlags()
const override;
488 bool OutlineFromLinkOnceODRs)
const override;
489 std::optional<std::unique_ptr<outliner::OutlinedFunction>>
490 getOutliningCandidateInfo(
492 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
493 unsigned MinRepeats)
const override;
494 void mergeOutliningCandidateAttributes(
495 Function &
F, std::vector<outliner::Candidate> &Candidates)
const override;
498 unsigned Flags)
const override;
500 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
508 bool shouldOutlineFromFunctionByDefault(
MachineFunction &MF)
const override;
512 bool AllowSideEffects =
true)
const override;
515 uint64_t getElementSizeForOpcode(
unsigned Opc)
const;
519 bool isPTestLikeOpcode(
unsigned Opc)
const;
521 bool isWhileOpcode(
unsigned Opc)
const;
536 std::optional<ParamLoadedValue>
539 unsigned int getTailDuplicateSize(
CodeGenOptLevel OptLevel)
const override;
546 int64_t &NumPredicateVectors,
547 int64_t &NumDataVectors);
555 bool isLegalAddressingMode(
unsigned NumBytes, int64_t
Offset,
556 unsigned Scale)
const;
563 bool FrameSetup)
const;
565#define GET_INSTRINFO_HELPER_DECLS
566#include "AArch64GenInstrInfo.inc"
572 std::optional<DestSourcePair>
574 std::optional<DestSourcePair>
578 unsigned getInstBundleLength(
const MachineInstr &
MI)
const;
584 void fixupPostOutline(MachineBasicBlock &
MBB)
const;
586 void instantiateCondBranch(MachineBasicBlock &
MBB,
const DebugLoc &
DL,
587 MachineBasicBlock *
TBB,
589 bool substituteCmpToZero(MachineInstr &CmpInstr,
unsigned SrcReg,
590 const MachineRegisterInfo &
MRI)
const;
591 bool removeCmpToZeroOrOne(MachineInstr &CmpInstr,
unsigned SrcReg,
592 int CmpValue,
const MachineRegisterInfo &
MRI)
const;
596 Register findRegisterToSaveLRTo(outliner::Candidate &
C)
const;
600 bool optimizePTestInstr(MachineInstr *PTest,
unsigned MaskReg,
602 const MachineRegisterInfo *
MRI)
const;
603 std::optional<unsigned>
604 canRemovePTestInstr(MachineInstr *PTest, MachineInstr *Mask,
605 MachineInstr *Pred,
const MachineRegisterInfo *
MRI)
const;
608 bool verifyInstruction(
const MachineInstr &
MI,
609 StringRef &ErrInfo)
const override;
621 this->N |= UsedFlags.
N;
622 this->Z |= UsedFlags.
Z;
623 this->C |= UsedFlags.
C;
624 this->V |= UsedFlags.
V;
634std::optional<UsedNZCV>
636 const TargetRegisterInfo &
TRI,
637 SmallVectorImpl<MachineInstr *> *CCUseInstrs =
nullptr);
642 const MachineInstr &
UseMI,
643 const TargetRegisterInfo *
TRI);
645MCCFIInstruction
createDefCFA(
const TargetRegisterInfo &
TRI,
unsigned FrameReg,
647 bool LastAdjustmentWasScalable =
true);
650 const StackOffset &OffsetFromDefCFA,
651 std::optional<int64_t> IncomingVGOffsetFromDefCFA);
658 const DebugLoc &
DL,
unsigned DestReg,
unsigned SrcReg,
659 StackOffset
Offset,
const TargetInstrInfo *
TII,
661 bool SetNZCV =
false,
bool NeedsWinCFI =
false,
662 bool *HasWinCFI =
nullptr,
bool EmitCFAOffset =
false,
663 StackOffset InitialOffset = {},
664 unsigned FrameReg = AArch64::SP);
695 bool *OutUseUnscaledOp =
nullptr,
696 unsigned *OutUnscaledOp =
nullptr,
697 int64_t *EmittableOffset =
nullptr);
712 case AArch64::CBWPri:
713 case AArch64::CBXPri:
714 case AArch64::CBWPrr:
715 case AArch64::CBXPrr:
739 case AArch64::BLRAAZ:
740 case AArch64::BLRABZ:
749 case AArch64::PTRUE_B:
750 case AArch64::PTRUE_H:
751 case AArch64::PTRUE_S:
752 case AArch64::PTRUE_D:
766 case IA:
case IB:
return AArch64::XPACI;
767 case DA:
case DB:
return AArch64::XPACD;
777 case IA:
return Zero ? AArch64::AUTIZA : AArch64::AUTIA;
778 case IB:
return Zero ? AArch64::AUTIZB : AArch64::AUTIB;
779 case DA:
return Zero ? AArch64::AUTDZA : AArch64::AUTDA;
780 case DB:
return Zero ? AArch64::AUTDZB : AArch64::AUTDB;
790 case IA:
return Zero ? AArch64::PACIZA : AArch64::PACIA;
791 case IB:
return Zero ? AArch64::PACIZB : AArch64::PACIB;
792 case DA:
return Zero ? AArch64::PACDZA : AArch64::PACDA;
793 case DB:
return Zero ? AArch64::PACDZB : AArch64::PACDB;
799#define TSFLAG_ELEMENT_SIZE_TYPE(X) (X)
800#define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3)
801#define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7)
802#define TSFLAG_INSTR_FLAGS(X) ((X) << 9)
803#define TSFLAG_SME_MATRIX_TYPE(X) ((X) << 11)
853#undef TSFLAG_ELEMENT_SIZE_TYPE
854#undef TSFLAG_DESTRUCTIVE_INST_TYPE
855#undef TSFLAG_FALSE_LANE_TYPE
856#undef TSFLAG_INSTR_FLAGS
857#undef TSFLAG_SME_MATRIX_TYPE
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
#define TSFLAG_DESTRUCTIVE_INST_TYPE(X)
#define TSFLAG_SME_MATRIX_TYPE(X)
#define TSFLAG_FALSE_LANE_TYPE(X)
#define TSFLAG_INSTR_FLAGS(X)
#define TSFLAG_ELEMENT_SIZE_TYPE(X)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
const HexagonInstrInfo * TII
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isHForm(const MachineInstr &MI)
Returns whether the instruction is in H form (16 bit operands)
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
static bool hasBTISemantics(const MachineInstr &MI)
Returns whether the instruction can be compatible with non-zero BTYPE.
static bool isQForm(const MachineInstr &MI)
Returns whether the instruction is in Q form (128 bit operands)
static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width, int64_t &MinOffset, int64_t &MaxOffset)
Returns true if opcode Opc is a memory operation.
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isFPRCopy(const MachineInstr &MI)
Does this instruction rename an FPR without modifying bits?
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
static int getMemScale(const MachineInstr &MI)
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
bool isSubregFoldable() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
static bool isGPRCopy(const MachineInstr &MI)
Does this instruction rename a GPR without modifying bits?
static unsigned convertToFlagSettingOpc(unsigned Opc)
Return the opcode that set flags when possible.
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
static const MachineOperand & getLdStOffsetOp(const MachineInstr &MI)
Returns the immediate offset operator of a load/store.
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
static std::optional< unsigned > getUnscaledLdSt(unsigned Opc)
Returns the unscaled load/store for the scaled load/store opcode, if there is a corresponding unscale...
static bool hasUnscaledLdStOffset(unsigned Opc)
Return true if it has an unscaled load/store offset.
static const MachineOperand & getLdStAmountOp(const MachineInstr &MI)
Returns the shift amount operator of a load/store.
static bool hasUnscaledLdStOffset(MachineInstr &MI)
static bool isPreLdSt(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load/store.
std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
static bool isPairableLdStInst(const MachineInstr &MI)
Return true if pairing the given load or store may be paired with another.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
const AArch64RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
static bool isPreSt(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed store.
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
AArch64InstrInfo(const AArch64Subtarget &STI)
static bool isPairedLdSt(const MachineInstr &MI)
Returns whether the instruction is a paired load/store.
bool getMemOperandWithOffsetWidth(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, TypeSize &Width, const TargetRegisterInfo *TRI) const
If OffsetIsScalable is set to 'true', the offset is scaled by vscale.
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
static bool isStridedAccess(const MachineInstr &MI)
Return true if the given load or store is a strided memory access.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Detect opportunities for ldp/stp formation.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
bool isThroughputPattern(unsigned Pattern) const override
Return true when a code sequence can improve throughput.
MachineOperand & getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const
Return the immediate offset of the base register in a load/store LdSt.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
static bool isLdStPairSuppressed(const MachineInstr &MI)
Return true if pairing the given load or store is hinted to be unprofitable.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
MachineBasicBlock::iterator probedStackAlloc(MachineBasicBlock::iterator MBBI, Register TargetReg, bool FrameSetup) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction supplying the argument to the comparison into one that...
static unsigned getLoadStoreImmIdx(unsigned Opc)
Returns the index for the immediate for a given instruction.
static bool isGPRZero(const MachineInstr &MI)
Does this instruction set its full destination register to zero?
void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, unsigned ZeroReg, llvm::ArrayRef< unsigned > Indices) const
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2,...
CombinerObjective getCombinerObjective(unsigned Pattern) const override
static bool isFpOrNEON(Register Reg)
Returns whether the physical register is FP or NEON.
bool isAsCheapAsAMove(const MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyLikeInstrImpl(const MachineInstr &MI) const override
static void suppressLdStPair(MachineInstr &MI)
Hint that pairing the given load or store is unprofitable.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static bool isPreLd(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load.
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, llvm::ArrayRef< unsigned > Indices) const
bool optimizeCondBranch(MachineInstr &MI) const override
Replace csincr-branch sequence by simple conditional branch.
static int getMemScale(unsigned Opc)
Scaling factor for (scaled or unscaled) load or store.
bool isCandidateToMergeOrPair(const MachineInstr &MI) const
Return true if this is a load/store that can be potentially paired/merged.
MCInst getNop() const override
static const MachineOperand & getLdStBaseOp(const MachineInstr &MI)
Returns the base register operator of a load/store.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Instances of this class represent a single low-level machine instruction.
Wrapper class representing physical registers. Should be passed by value.
MachineInstrBundleIterator< MachineInstr > iterator
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Wrapper class representing virtual and physical registers.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
int getSVERevInstr(uint16_t Opcode)
int getSMEPseudoMap(uint16_t Opcode)
static const uint64_t InstrFlagIsWhile
static const uint64_t InstrFlagIsPTestLike
@ Destructive2xRegImmUnpred
@ DestructiveBinaryShImmUnpred
@ DestructiveInstTypeMask
@ DestructiveUnaryPassthru
@ DestructiveTernaryCommWithRev
@ DestructiveBinaryCommWithRev
int getSVEPseudoMap(uint16_t Opcode)
int getSVENonRevInstr(uint16_t Opcode)
@ C
The default llvm calling convention, compatible with C.
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
static bool isCondBranchOpcode(int Opc)
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
static bool isPTrueOpcode(unsigned Opc)
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
static bool isIndirectBranchOpcode(int Opc)
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
AArch64FrameOffsetStatus
Use to report the frame offset status in isAArch64FrameOffsetLegal.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
static bool isSEHInstruction(const MachineInstr &MI)
static bool isIndirectCallOpcode(unsigned Opc)
AArch64MachineCombinerPattern
@ MULSUBv2i32_indexed_OP1
@ MULADDv4i16_indexed_OP2
@ MULSUBv8i16_indexed_OP2
@ MULSUBv4i16_indexed_OP2
@ MULSUBv4i32_indexed_OP2
@ MULADDv2i32_indexed_OP1
@ MULADDv4i32_indexed_OP1
@ MULADDv2i32_indexed_OP2
@ MULSUBv4i16_indexed_OP1
@ MULADDv4i32_indexed_OP2
@ MULSUBv8i16_indexed_OP1
@ MULSUBv2i32_indexed_OP2
@ MULADDv8i16_indexed_OP1
@ MULSUBv4i32_indexed_OP1
@ MULADDv8i16_indexed_OP2
@ MULADDv4i16_indexed_OP1
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
std::optional< UsedNZCV > examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI, SmallVectorImpl< MachineInstr * > *CCUseInstrs=nullptr)
CodeGenOptLevel
Code generation optimization level.
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA, std::optional< int64_t > IncomingVGOffsetFromDefCFA)
ArrayRef(const T &OneElt) -> ArrayRef< T >
static bool isUncondBranchOpcode(int Opc)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static const MachineMemOperand::Flags MOSuppressPair
bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI, const MachineInstr &UseMI, const TargetRegisterInfo *TRI)
Return true if there is an instruction /after/ DefMI and before UseMI which either reads or clobbers ...
static const MachineMemOperand::Flags MOStridedAccess
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
UsedNZCV & operator|=(const UsedNZCV &UsedFlags)
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.