LLVM 20.0.0git
AArch64InstrInfo.h
Go to the documentation of this file.
1//===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15
16#include "AArch64.h"
17#include "AArch64RegisterInfo.h"
20#include <optional>
21
22#define GET_INSTRINFO_HEADER
23#include "AArch64GenInstrInfo.inc"
24
25namespace llvm {
26
27class AArch64Subtarget;
28
33
34#define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
35
36// AArch64 MachineCombiner patterns
38 // These are patterns used to reduce the length of dependence chain.
41
42 // These are multiply-add patterns matched by the AArch64 machine combiner.
55 // NEON integers vectors
68
81
90
99
100 // Floating Point
162
173
175};
177 const AArch64RegisterInfo RI;
178 const AArch64Subtarget &Subtarget;
179
180public:
181 explicit AArch64InstrInfo(const AArch64Subtarget &STI);
182
183 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
184 /// such, whenever a client has an instance of instruction info, it should
185 /// always be able to get register info as well (through this method).
186 const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
187
188 unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
189
190 bool isAsCheapAsAMove(const MachineInstr &MI) const override;
191
192 bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
193 Register &DstReg, unsigned &SubIdx) const override;
194
195 bool
197 const MachineInstr &MIb) const override;
198
200 int &FrameIndex) const override;
202 int &FrameIndex) const override;
203
204 /// Does this instruction set its full destination register to zero?
205 static bool isGPRZero(const MachineInstr &MI);
206
207 /// Does this instruction rename a GPR without modifying bits?
208 static bool isGPRCopy(const MachineInstr &MI);
209
210 /// Does this instruction rename an FPR without modifying bits?
211 static bool isFPRCopy(const MachineInstr &MI);
212
213 /// Return true if pairing the given load or store is hinted to be
214 /// unprofitable.
215 static bool isLdStPairSuppressed(const MachineInstr &MI);
216
217 /// Return true if the given load or store is a strided memory access.
218 static bool isStridedAccess(const MachineInstr &MI);
219
220 /// Return true if it has an unscaled load/store offset.
221 static bool hasUnscaledLdStOffset(unsigned Opc);
223 return hasUnscaledLdStOffset(MI.getOpcode());
224 }
225
226 /// Returns the unscaled load/store for the scaled load/store opcode,
227 /// if there is a corresponding unscaled variant available.
228 static std::optional<unsigned> getUnscaledLdSt(unsigned Opc);
229
230 /// Scaling factor for (scaled or unscaled) load or store.
231 static int getMemScale(unsigned Opc);
232 static int getMemScale(const MachineInstr &MI) {
233 return getMemScale(MI.getOpcode());
234 }
235
236 /// Returns whether the instruction is a pre-indexed load.
237 static bool isPreLd(const MachineInstr &MI);
238
239 /// Returns whether the instruction is a pre-indexed store.
240 static bool isPreSt(const MachineInstr &MI);
241
242 /// Returns whether the instruction is a pre-indexed load/store.
243 static bool isPreLdSt(const MachineInstr &MI);
244
245 /// Returns whether the instruction is a paired load/store.
246 static bool isPairedLdSt(const MachineInstr &MI);
247
248 /// Returns the base register operator of a load/store.
249 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI);
250
251 /// Returns the immediate offset operator of a load/store.
252 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI);
253
254 /// Returns whether the physical register is FP or NEON.
255 static bool isFpOrNEON(Register Reg);
256
257 /// Returns the shift amount operator of a load/store.
258 static const MachineOperand &getLdStAmountOp(const MachineInstr &MI);
259
260 /// Returns whether the instruction is FP or NEON.
261 static bool isFpOrNEON(const MachineInstr &MI);
262
263 /// Returns whether the instruction is in H form (16 bit operands)
264 static bool isHForm(const MachineInstr &MI);
265
266 /// Returns whether the instruction is in Q form (128 bit operands)
267 static bool isQForm(const MachineInstr &MI);
268
269 /// Returns whether the instruction can be compatible with non-zero BTYPE.
270 static bool hasBTISemantics(const MachineInstr &MI);
271
272 /// Returns the index for the immediate for a given instruction.
273 static unsigned getLoadStoreImmIdx(unsigned Opc);
274
275 /// Return true if pairing the given load or store may be paired with another.
276 static bool isPairableLdStInst(const MachineInstr &MI);
277
278 /// Returns true if MI is one of the TCRETURN* instructions.
279 static bool isTailCallReturnInst(const MachineInstr &MI);
280
281 /// Return the opcode that set flags when possible. The caller is
282 /// responsible for ensuring the opc has a flag setting equivalent.
283 static unsigned convertToFlagSettingOpc(unsigned Opc);
284
285 /// Return true if this is a load/store that can be potentially paired/merged.
286 bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
287
288 /// Hint that pairing the given load or store is unprofitable.
289 static void suppressLdStPair(MachineInstr &MI);
290
291 std::optional<ExtAddrMode>
293 const TargetRegisterInfo *TRI) const override;
294
296 const MachineInstr &AddrI,
297 ExtAddrMode &AM) const override;
298
300 const ExtAddrMode &AM) const override;
301
304 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
305 const TargetRegisterInfo *TRI) const override;
306
307 /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
308 /// This is true for some SVE instructions like ldr/str that have a
309 /// 'reg + imm' addressing mode where the immediate is an index to the
310 /// scalable vector located at 'reg + imm * vscale x #bytes'.
312 const MachineOperand *&BaseOp,
313 int64_t &Offset, bool &OffsetIsScalable,
314 TypeSize &Width,
315 const TargetRegisterInfo *TRI) const;
316
317 /// Return the immediate offset of the base register in a load/store \p LdSt.
319
320 /// Returns true if opcode \p Opc is a memory operation. If it is, set
321 /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
322 ///
323 /// For unscaled instructions, \p Scale is set to 1. All values are in bytes.
324 /// MinOffset/MaxOffset are the un-scaled limits of the immediate in the
325 /// instruction, the actual offset limit is [MinOffset*Scale,
326 /// MaxOffset*Scale].
327 static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width,
328 int64_t &MinOffset, int64_t &MaxOffset);
329
331 int64_t Offset1, bool OffsetIsScalable1,
333 int64_t Offset2, bool OffsetIsScalable2,
334 unsigned ClusterSize,
335 unsigned NumBytes) const override;
336
338 const DebugLoc &DL, MCRegister DestReg,
339 MCRegister SrcReg, bool KillSrc, unsigned Opcode,
340 llvm::ArrayRef<unsigned> Indices) const;
342 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
343 bool KillSrc, unsigned Opcode, unsigned ZeroReg,
344 llvm::ArrayRef<unsigned> Indices) const;
346 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
347 bool KillSrc, bool RenamableDest = false,
348 bool RenamableSrc = false) const override;
349
352 bool isKill, int FrameIndex,
353 const TargetRegisterClass *RC,
354 const TargetRegisterInfo *TRI,
355 Register VReg) const override;
356
359 int FrameIndex, const TargetRegisterClass *RC,
360 const TargetRegisterInfo *TRI,
361 Register VReg) const override;
362
363 // This tells target independent code that it is okay to pass instructions
364 // with subreg operands to foldMemoryOperandImpl.
365 bool isSubregFoldable() const override { return true; }
366
371 MachineBasicBlock::iterator InsertPt, int FrameIndex,
372 LiveIntervals *LIS = nullptr,
373 VirtRegMap *VRM = nullptr) const override;
374
375 /// \returns true if a branch from an instruction with opcode \p BranchOpc
376 /// bytes is capable of jumping to a position \p BrOffset bytes away.
377 bool isBranchOffsetInRange(unsigned BranchOpc,
378 int64_t BrOffset) const override;
379
380 MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
381
383 MachineBasicBlock &NewDestBB,
384 MachineBasicBlock &RestoreBB, const DebugLoc &DL,
385 int64_t BrOffset, RegScavenger *RS) const override;
386
388 MachineBasicBlock *&FBB,
390 bool AllowModify = false) const override;
392 MachineBranchPredicate &MBP,
393 bool AllowModify) const override;
395 int *BytesRemoved = nullptr) const override;
398 const DebugLoc &DL,
399 int *BytesAdded = nullptr) const override;
400
401 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
402 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override;
403
404 bool
407 Register, Register, Register, int &, int &,
408 int &) const override;
410 const DebugLoc &DL, Register DstReg,
412 Register FalseReg) const override;
413
415 MachineBasicBlock::iterator MI) const override;
416
417 MCInst getNop() const override;
418
420 const MachineBasicBlock *MBB,
421 const MachineFunction &MF) const override;
422
423 /// analyzeCompare - For a comparison instruction, return the source registers
424 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
425 /// Return true if the comparison instruction can be analyzed.
426 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
427 Register &SrcReg2, int64_t &CmpMask,
428 int64_t &CmpValue) const override;
429 /// optimizeCompareInstr - Convert the instruction supplying the argument to
430 /// the comparison into one that sets the zero bit in the flags register.
431 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
432 Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
433 const MachineRegisterInfo *MRI) const override;
434 bool optimizeCondBranch(MachineInstr &MI) const override;
435
436 CombinerObjective getCombinerObjective(unsigned Pattern) const override;
437 /// Return true when a code sequence can improve throughput. It
438 /// should be called only for instructions in loops.
439 /// \param Pattern - combiner pattern
440 bool isThroughputPattern(unsigned Pattern) const override;
441 /// Return true when there is potentially a faster code sequence
442 /// for an instruction chain ending in ``Root``. All potential patterns are
443 /// listed in the ``Patterns`` array.
446 bool DoRegPressureReduce) const override;
447 /// Return true when Inst is associative and commutative so that it can be
448 /// reassociated. If Invert is true, then the inverse of Inst operation must
449 /// be checked.
451 bool Invert) const override;
452 /// When getMachineCombinerPatterns() finds patterns, this function generates
453 /// the instructions that could replace the original code sequence
455 MachineInstr &Root, unsigned Pattern,
458 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
459 /// AArch64 supports MachineCombiner.
460 bool useMachineCombiner() const override;
461
462 bool expandPostRAPseudo(MachineInstr &MI) const override;
463
464 std::pair<unsigned, unsigned>
465 decomposeMachineOperandsTargetFlags(unsigned TF) const override;
472
474 bool OutlineFromLinkOnceODRs) const override;
475 std::optional<std::unique_ptr<outliner::OutlinedFunction>>
477 const MachineModuleInfo &MMI,
478 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
479 unsigned MinRepeats) const override;
481 Function &F, std::vector<outliner::Candidate> &Candidates) const override;
484 unsigned Flags) const override;
486 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
487 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const override;
489 const outliner::OutlinedFunction &OF) const override;
493 outliner::Candidate &C) const override;
495
498 bool AllowSideEffects = true) const override;
499
500 /// Returns the vector element size (B, H, S or D) of an SVE opcode.
501 uint64_t getElementSizeForOpcode(unsigned Opc) const;
502 /// Returns true if the opcode is for an SVE instruction that sets the
503 /// condition codes as if it's results had been fed to a PTEST instruction
504 /// along with the same general predicate.
505 bool isPTestLikeOpcode(unsigned Opc) const;
506 /// Returns true if the opcode is for an SVE WHILE## instruction.
507 bool isWhileOpcode(unsigned Opc) const;
508 /// Returns true if the instruction has a shift by immediate that can be
509 /// executed in one cycle less.
510 static bool isFalkorShiftExtFast(const MachineInstr &MI);
511 /// Return true if the instructions is a SEH instruciton used for unwinding
512 /// on Windows.
513 static bool isSEHInstruction(const MachineInstr &MI);
514
515 std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
516 Register Reg) const override;
517
518 bool isFunctionSafeToSplit(const MachineFunction &MF) const override;
519
520 bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const override;
521
522 std::optional<ParamLoadedValue>
523 describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
524
525 unsigned int getTailDuplicateSize(CodeGenOptLevel OptLevel) const override;
526
528 MachineRegisterInfo &MRI) const override;
529
531 int64_t &NumBytes,
532 int64_t &NumPredicateVectors,
533 int64_t &NumDataVectors);
535 int64_t &ByteSized,
536 int64_t &VGSized);
537
538 // Return true if address of the form BaseReg + Scale * ScaledReg + Offset can
539 // be used for a load/store of NumBytes. BaseReg is always present and
540 // implicit.
541 bool isLegalAddressingMode(unsigned NumBytes, int64_t Offset,
542 unsigned Scale) const;
543
544 // Decrement the SP, issuing probes along the way. `TargetReg` is the new top
545 // of the stack. `FrameSetup` is passed as true, if the allocation is a part
546 // of constructing the activation frame of a function.
548 Register TargetReg,
549 bool FrameSetup) const;
550
551#define GET_INSTRINFO_HELPER_DECLS
552#include "AArch64GenInstrInfo.inc"
553
554protected:
555 /// If the specific machine instruction is an instruction that moves/copies
556 /// value from one register to another register return destination and source
557 /// registers as machine operands.
558 std::optional<DestSourcePair>
559 isCopyInstrImpl(const MachineInstr &MI) const override;
560 std::optional<DestSourcePair>
561 isCopyLikeInstrImpl(const MachineInstr &MI) const override;
562
563private:
564 unsigned getInstBundleLength(const MachineInstr &MI) const;
565
566 /// Sets the offsets on outlined instructions in \p MBB which use SP
567 /// so that they will be valid post-outlining.
568 ///
569 /// \param MBB A \p MachineBasicBlock in an outlined function.
570 void fixupPostOutline(MachineBasicBlock &MBB) const;
571
572 void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
573 MachineBasicBlock *TBB,
574 ArrayRef<MachineOperand> Cond) const;
575 bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
576 const MachineRegisterInfo &MRI) const;
577 bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
578 int CmpValue, const MachineRegisterInfo &MRI) const;
579
580 /// Returns an unused general-purpose register which can be used for
581 /// constructing an outlined call if one exists. Returns 0 otherwise.
582 Register findRegisterToSaveLRTo(outliner::Candidate &C) const;
583
584 /// Remove a ptest of a predicate-generating operation that already sets, or
585 /// can be made to set, the condition codes in an identical manner
586 bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
587 unsigned PredReg,
588 const MachineRegisterInfo *MRI) const;
589 std::optional<unsigned>
590 canRemovePTestInstr(MachineInstr *PTest, MachineInstr *Mask,
591 MachineInstr *Pred, const MachineRegisterInfo *MRI) const;
592
593 /// verifyInstruction - Perform target specific instruction verification.
594 bool verifyInstruction(const MachineInstr &MI,
595 StringRef &ErrInfo) const override;
596};
597
598struct UsedNZCV {
599 bool N = false;
600 bool Z = false;
601 bool C = false;
602 bool V = false;
603
604 UsedNZCV() = default;
605
606 UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
607 this->N |= UsedFlags.N;
608 this->Z |= UsedFlags.Z;
609 this->C |= UsedFlags.C;
610 this->V |= UsedFlags.V;
611 return *this;
612 }
613};
614
615/// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
616/// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
617/// \returns std::nullopt otherwise.
618///
619/// Collect instructions using that flags in \p CCUseInstrs if provided.
620std::optional<UsedNZCV>
621examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
622 const TargetRegisterInfo &TRI,
623 SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
624
625/// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
626/// which either reads or clobbers NZCV.
627bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
628 const MachineInstr &UseMI,
629 const TargetRegisterInfo *TRI);
630
631MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
632 unsigned Reg, const StackOffset &Offset,
633 bool LastAdjustmentWasScalable = true);
634MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
635 const StackOffset &OffsetFromDefCFA);
636
637/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
638/// plus Offset. This is intended to be used from within the prolog/epilog
639/// insertion (PEI) pass, where a virtual scratch register may be allocated
640/// if necessary, to be replaced by the scavenger at the end of PEI.
641void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
642 const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
643 StackOffset Offset, const TargetInstrInfo *TII,
645 bool SetNZCV = false, bool NeedsWinCFI = false,
646 bool *HasWinCFI = nullptr, bool EmitCFAOffset = false,
647 StackOffset InitialOffset = {},
648 unsigned FrameReg = AArch64::SP);
649
650/// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
651/// FP. Return false if the offset could not be handled directly in MI, and
652/// return the left-over portion by reference.
653bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
654 unsigned FrameReg, StackOffset &Offset,
655 const AArch64InstrInfo *TII);
656
657/// Use to report the frame offset status in isAArch64FrameOffsetLegal.
659 AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
660 AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
661 AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
663
664/// Check if the @p Offset is a valid frame offset for @p MI.
665/// The returned value reports the validity of the frame offset for @p MI.
666/// It uses the values defined by AArch64FrameOffsetStatus for that.
667/// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
668/// use an offset.eq
669/// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
670/// rewritten in @p MI.
671/// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
672/// amount that is off the limit of the legal offset.
673/// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
674/// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
675/// If set, @p EmittableOffset contains the amount that can be set in @p MI
676/// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
677/// is a legal offset.
678int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
679 bool *OutUseUnscaledOp = nullptr,
680 unsigned *OutUnscaledOp = nullptr,
681 int64_t *EmittableOffset = nullptr);
682
683static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
684
685static inline bool isCondBranchOpcode(int Opc) {
686 switch (Opc) {
687 case AArch64::Bcc:
688 case AArch64::CBZW:
689 case AArch64::CBZX:
690 case AArch64::CBNZW:
691 case AArch64::CBNZX:
692 case AArch64::TBZW:
693 case AArch64::TBZX:
694 case AArch64::TBNZW:
695 case AArch64::TBNZX:
696 return true;
697 default:
698 return false;
699 }
700}
701
702static inline bool isIndirectBranchOpcode(int Opc) {
703 switch (Opc) {
704 case AArch64::BR:
705 case AArch64::BRAA:
706 case AArch64::BRAB:
707 case AArch64::BRAAZ:
708 case AArch64::BRABZ:
709 return true;
710 }
711 return false;
712}
713
714static inline bool isPTrueOpcode(unsigned Opc) {
715 switch (Opc) {
716 case AArch64::PTRUE_B:
717 case AArch64::PTRUE_H:
718 case AArch64::PTRUE_S:
719 case AArch64::PTRUE_D:
720 return true;
721 default:
722 return false;
723 }
724}
725
726/// Return opcode to be used for indirect calls.
727unsigned getBLRCallOpcode(const MachineFunction &MF);
728
729/// Return XPAC opcode to be used for a ptrauth strip using the given key.
730static inline unsigned getXPACOpcodeForKey(AArch64PACKey::ID K) {
731 using namespace AArch64PACKey;
732 switch (K) {
733 case IA: case IB: return AArch64::XPACI;
734 case DA: case DB: return AArch64::XPACD;
735 }
736 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
737}
738
739/// Return AUT opcode to be used for a ptrauth auth using the given key, or its
740/// AUT*Z variant that doesn't take a discriminator operand, using zero instead.
741static inline unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
742 using namespace AArch64PACKey;
743 switch (K) {
744 case IA: return Zero ? AArch64::AUTIZA : AArch64::AUTIA;
745 case IB: return Zero ? AArch64::AUTIZB : AArch64::AUTIB;
746 case DA: return Zero ? AArch64::AUTDZA : AArch64::AUTDA;
747 case DB: return Zero ? AArch64::AUTDZB : AArch64::AUTDB;
748 }
749 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
750}
751
752/// Return PAC opcode to be used for a ptrauth sign using the given key, or its
753/// PAC*Z variant that doesn't take a discriminator operand, using zero instead.
754static inline unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
755 using namespace AArch64PACKey;
756 switch (K) {
757 case IA: return Zero ? AArch64::PACIZA : AArch64::PACIA;
758 case IB: return Zero ? AArch64::PACIZB : AArch64::PACIB;
759 case DA: return Zero ? AArch64::PACDZA : AArch64::PACDA;
760 case DB: return Zero ? AArch64::PACDZB : AArch64::PACDB;
761 }
762 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
763}
764
765// struct TSFlags {
766#define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
767#define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
768#define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits
769#define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits
770#define TSFLAG_SME_MATRIX_TYPE(X) ((X) << 11) // 3-bits
771// }
772
773namespace AArch64 {
774
782};
783
796};
797
802};
803
804// NOTE: This is a bit field.
807
817};
818
819#undef TSFLAG_ELEMENT_SIZE_TYPE
820#undef TSFLAG_DESTRUCTIVE_INST_TYPE
821#undef TSFLAG_FALSE_LANE_TYPE
822#undef TSFLAG_INSTR_FLAGS
823#undef TSFLAG_SME_MATRIX_TYPE
824
828
830}
831
832} // end namespace llvm
833
834#endif
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
#define TSFLAG_DESTRUCTIVE_INST_TYPE(X)
#define TSFLAG_SME_MATRIX_TYPE(X)
#define TSFLAG_FALSE_LANE_TYPE(X)
#define TSFLAG_INSTR_FLAGS(X)
#define TSFLAG_ELEMENT_SIZE_TYPE(X)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
unsigned Reg
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isHForm(const MachineInstr &MI)
Returns whether the instruction is in H form (16 bit operands)
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
static bool hasBTISemantics(const MachineInstr &MI)
Returns whether the instruction can be compatible with non-zero BTYPE.
static bool isQForm(const MachineInstr &MI)
Returns whether the instruction is in Q form (128 bit operands)
static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset, int64_t &NumBytes, int64_t &NumPredicateVectors, int64_t &NumDataVectors)
Returns the offset in parts to which this frame offset can be decomposed for the purpose of describin...
static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width, int64_t &MinOffset, int64_t &MaxOffset)
Returns true if opcode Opc is a memory operation.
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isFPRCopy(const MachineInstr &MI)
Does this instruction rename an FPR without modifying bits?
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
static int getMemScale(const MachineInstr &MI)
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool isSubregFoldable() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
uint64_t getElementSizeForOpcode(unsigned Opc) const
Returns the vector element size (B, H, S or D) of an SVE opcode.
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
static bool isGPRCopy(const MachineInstr &MI)
Does this instruction rename a GPR without modifying bits?
static unsigned convertToFlagSettingOpc(unsigned Opc)
Return the opcode that set flags when possible.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
static const MachineOperand & getLdStOffsetOp(const MachineInstr &MI)
Returns the immediate offset operator of a load/store.
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool isWhileOpcode(unsigned Opc) const
Returns true if the opcode is for an SVE WHILE## instruction.
static std::optional< unsigned > getUnscaledLdSt(unsigned Opc)
Returns the unscaled load/store for the scaled load/store opcode, if there is a corresponding unscale...
static bool hasUnscaledLdStOffset(unsigned Opc)
Return true if it has an unscaled load/store offset.
static const MachineOperand & getLdStAmountOp(const MachineInstr &MI)
Returns the shift amount operator of a load/store.
static bool hasUnscaledLdStOffset(MachineInstr &MI)
static bool isPreLdSt(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load/store.
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify) const override
static bool isSEHInstruction(const MachineInstr &MI)
Return true if the instructions is a SEH instruciton used for unwinding on Windows.
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
SmallVector< std::pair< MachineBasicBlock::iterator, MachineBasicBlock::iterator > > getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const override
static bool isPairableLdStInst(const MachineInstr &MI)
Return true if pairing the given load or store may be paired with another.
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
const AArch64RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
static bool isPreSt(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed store.
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
static bool isPairedLdSt(const MachineInstr &MI)
Returns whether the instruction is a paired load/store.
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool useMachineCombiner() const override
AArch64 supports MachineCombiner.
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, MachineRegisterInfo &MRI) const override
static bool isFalkorShiftExtFast(const MachineInstr &MI)
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, TypeSize &Width, const TargetRegisterInfo *TRI) const
If OffsetIsScalable is set to 'true', the offset is scaled by vscale.
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
static bool isStridedAccess(const MachineInstr &MI)
Return true if the given load or store is a strided memory access.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Detect opportunities for ldp/stp formation.
bool expandPostRAPseudo(MachineInstr &MI) const override
unsigned int getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
bool isThroughputPattern(unsigned Pattern) const override
Return true when a code sequence can improve throughput.
void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
MachineOperand & getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const
Return the immediate offset of the base register in a load/store LdSt.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
static bool isLdStPairSuppressed(const MachineInstr &MI)
Return true if pairing the given load or store is hinted to be unprofitable.
bool isFunctionSafeToSplit(const MachineFunction &MF) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
Return true when Inst is associative and commutative so that it can be reassociated.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
MachineBasicBlock::iterator probedStackAlloc(MachineBasicBlock::iterator MBBI, Register TargetReg, bool FrameSetup) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction supplying the argument to the comparison into one that...
static unsigned getLoadStoreImmIdx(unsigned Opc)
Returns the index for the immediate for a given instruction.
static bool isGPRZero(const MachineInstr &MI)
Does this instruction set its full destination register to zero?
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, unsigned ZeroReg, llvm::ArrayRef< unsigned > Indices) const
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2,...
CombinerObjective getCombinerObjective(unsigned Pattern) const override
static bool isFpOrNEON(Register Reg)
Returns whether the physical register is FP or NEON.
bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool isLegalAddressingMode(unsigned NumBytes, int64_t Offset, unsigned Scale) const
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
std::optional< DestSourcePair > isCopyLikeInstrImpl(const MachineInstr &MI) const override
static void suppressLdStPair(MachineInstr &MI)
Hint that pairing the given load or store is unprofitable.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static bool isPreLd(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load.
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, llvm::ArrayRef< unsigned > Indices) const
bool optimizeCondBranch(MachineInstr &MI) const override
Replace csincr-branch sequence by simple conditional branch.
static int getMemScale(unsigned Opc)
Scaling factor for (scaled or unscaled) load or store.
bool isCandidateToMergeOrPair(const MachineInstr &MI) const
Return true if this is a load/store that can be potentially paired/merged.
MCInst getNop() const override
static const MachineOperand & getLdStBaseOp(const MachineInstr &MI)
Returns the base register operator of a load/store.
bool isPTestLikeOpcode(unsigned Opc) const
Returns true if the opcode is for an SVE instruction that sets the condition codes as if it's results...
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset, int64_t &ByteSized, int64_t &VGSized)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
A debug info location.
Definition: DebugLoc.h:33
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MachineInstrBundleIterator< MachineInstr > iterator
Representation of each machine instruction.
Definition: MachineInstr.h:69
Flags
Flags values. These may be or'd together.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
int getSVERevInstr(uint16_t Opcode)
int getSMEPseudoMap(uint16_t Opcode)
static const uint64_t InstrFlagIsWhile
static const uint64_t InstrFlagIsPTestLike
int getSVEPseudoMap(uint16_t Opcode)
int getSVENonRevInstr(uint16_t Opcode)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
static bool isCondBranchOpcode(int Opc)
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
static bool isPTrueOpcode(unsigned Opc)
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
static bool isIndirectBranchOpcode(int Opc)
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA)
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
AArch64FrameOffsetStatus
Use to report the frame offset status in isAArch64FrameOffsetLegal.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
AArch64MachineCombinerPattern
@ MULSUBv8i16_OP2
@ FMULv4i16_indexed_OP1
@ FMLSv1i32_indexed_OP2
@ MULSUBv2i32_indexed_OP1
@ MULADDXI_OP1
@ FMLAv2i32_indexed_OP2
@ MULADDv4i16_indexed_OP2
@ FMLAv1i64_indexed_OP1
@ MULSUBv16i8_OP1
@ FMLAv8i16_indexed_OP2
@ FMULv2i32_indexed_OP1
@ MULSUBv8i16_indexed_OP2
@ FMLAv1i64_indexed_OP2
@ MULSUBv4i16_indexed_OP2
@ FMLAv1i32_indexed_OP1
@ FMLAv2i64_indexed_OP2
@ FMLSv8i16_indexed_OP1
@ MULSUBv2i32_OP1
@ FMULv4i16_indexed_OP2
@ MULSUBv4i32_indexed_OP2
@ FMULv2i64_indexed_OP2
@ MULSUBXI_OP1
@ FMLAv4i32_indexed_OP1
@ MULADDWI_OP1
@ MULADDv4i16_OP2
@ FMULv8i16_indexed_OP2
@ MULSUBv4i16_OP1
@ MULADDv4i32_OP2
@ MULADDv8i8_OP1
@ MULADDv2i32_OP2
@ MULADDv16i8_OP2
@ MULADDv8i8_OP2
@ FMLSv4i16_indexed_OP1
@ MULADDv16i8_OP1
@ FMLAv2i64_indexed_OP1
@ FMLAv1i32_indexed_OP2
@ FMLSv2i64_indexed_OP2
@ MULADDv2i32_OP1
@ MULADDv4i32_OP1
@ MULADDv2i32_indexed_OP1
@ MULSUBv16i8_OP2
@ MULADDv4i32_indexed_OP1
@ MULADDv2i32_indexed_OP2
@ FMLAv4i16_indexed_OP2
@ MULSUBv8i16_OP1
@ FMULv2i32_indexed_OP2
@ FMLSv2i32_indexed_OP2
@ FMLSv4i32_indexed_OP1
@ FMULv2i64_indexed_OP1
@ MULSUBv4i16_OP2
@ FMLSv4i16_indexed_OP2
@ FMLAv2i32_indexed_OP1
@ FMLSv2i32_indexed_OP1
@ FMLAv8i16_indexed_OP1
@ MULSUBv4i16_indexed_OP1
@ FMLSv4i32_indexed_OP2
@ MULADDv4i32_indexed_OP2
@ MULSUBv4i32_OP2
@ MULSUBv8i16_indexed_OP1
@ MULADDv8i16_OP2
@ MULSUBv2i32_indexed_OP2
@ FMULv4i32_indexed_OP2
@ FMLSv2i64_indexed_OP1
@ MULADDv4i16_OP1
@ FMLAv4i32_indexed_OP2
@ MULADDv8i16_indexed_OP1
@ FMULv4i32_indexed_OP1
@ FMLAv4i16_indexed_OP1
@ FMULv8i16_indexed_OP1
@ MULSUBv8i8_OP1
@ MULADDv8i16_OP1
@ MULSUBv4i32_indexed_OP1
@ MULSUBv4i32_OP1
@ FMLSv8i16_indexed_OP2
@ MULADDv8i16_indexed_OP2
@ MULSUBWI_OP1
@ MULSUBv2i32_OP2
@ FMLSv1i64_indexed_OP2
@ MULADDv4i16_indexed_OP1
@ MULSUBv8i8_OP2
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
std::optional< UsedNZCV > examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI, SmallVectorImpl< MachineInstr * > *CCUseInstrs=nullptr)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
static bool isUncondBranchOpcode(int Opc)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static const MachineMemOperand::Flags MOSuppressPair
bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI, const MachineInstr &UseMI, const TargetRegisterInfo *TRI)
Return true if there is an instruction /after/ DefMI and before UseMI which either reads or clobbers ...
static const MachineMemOperand::Flags MOStridedAccess
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
UsedNZCV & operator|=(const UsedNZCV &UsedFlags)
UsedNZCV()=default
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.