LLVM 20.0.0git
AArch64ISelLowering.h
Go to the documentation of this file.
1//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that AArch64 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16
21#include "llvm/IR/CallingConv.h"
22#include "llvm/IR/Instruction.h"
23
24namespace llvm {
25
26namespace AArch64ISD {
27
28// For predicated nodes where the result is a vector, the operation is
29// controlled by a governing predicate and the inactive lanes are explicitly
30// defined with a value, please stick the following naming convention:
31//
32// _MERGE_OP<n> The result value is a vector with inactive lanes equal
33// to source operand OP<n>.
34//
35// _MERGE_ZERO The result value is a vector with inactive lanes
36// actively zeroed.
37//
38// _MERGE_PASSTHRU The result value is a vector with inactive lanes equal
39// to the last source operand which only purpose is being
40// a passthru value.
41//
42// For other cases where no explicit action is needed to set the inactive lanes,
43// or when the result is not a vector and it is needed or helpful to
44// distinguish a node from similar unpredicated nodes, use:
45//
46// _PRED
47//
48enum NodeType : unsigned {
50 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
51 CALL, // Function call.
52
53 // Pseudo for a OBJC call that gets emitted together with a special `mov
54 // x29, x29` marker instruction.
56
57 CALL_BTI, // Function call followed by a BTI instruction.
58
59 // Function call, authenticating the callee value first:
60 // AUTH_CALL chain, callee, auth key #, int disc, addr disc, operands.
62 // AUTH_TC_RETURN chain, callee, fpdiff, auth key #, int disc, addr disc,
63 // operands.
65
66 // Authenticated variant of CALL_RVMARKER.
68
70
73
79
80 // A call with the callee in x16, i.e. "blr x16".
82
83 // Produces the full sequence of instructions for getting the thread pointer
84 // offset of a variable into X0, using the TLSDesc model.
86 ADRP, // Page address of a TargetGlobalAddress operand.
87 ADR, // ADR
88 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
89 LOADgot, // Load from automatically generated descriptor (e.g. Global
90 // Offset Table, TLS record).
91 RET_GLUE, // Return with a glue operand. Operand 0 is the chain operand.
92 BRCOND, // Conditional branch instruction; "b.cond".
94 CSINV, // Conditional select invert.
95 CSNEG, // Conditional select negate.
96 CSINC, // Conditional select increment.
97
98 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
99 // ELF.
102 SBC, // adc, sbc instructions
103
104 // To avoid stack clash, allocation is performed by block and each block is
105 // probed.
107
108 // Predicated instructions where inactive lanes produce undefined results.
136
137 // Unpredicated vector instructions
139
141
142 // Predicated instructions with the result of inactive lanes provided by the
143 // last operand.
166
168
169 // Arithmetic instructions which write flags.
175
176 // Conditional compares. Operands: left,right,falsecc,cc,flags
180
181 // Floating point comparison
183
184 // Scalar-to-vector duplication
191
192 // Vector immedate moves
200
201 // Vector immediate ops
204
205 // Vector bitwise select: similar to ISD::VSELECT but not all bits within an
206 // element must be identical.
208
209 // Vector shuffles
221
222 // Vector shift by scalar
226
227 // Vector shift by scalar (again)
234
235 // Vector narrowing shift by immediate (bottom)
237
238 // Vector shift by constant and insert
241
242 // Vector comparisons
251
252 // Vector zero comparisons
263
264 // Round wide FP to narrow FP with inexact results to odd.
266
267 // Vector across-lanes addition
268 // Only the lower result lane is defined.
271
272 // Unsigned sum Long across Vector
275
276 // Wide adds
281
282 // Add Pairwise of two vectors
284 // Add Long Pairwise
287
288 // udot/sdot/usdot instructions
292
293 // Vector across-lanes min/max
294 // Only the lower result lane is defined.
299
309
310 // Compare-and-branch
315
316 // Tail calls
318
319 // Custom prefetch handling
321
322 // {s|u}int to FP within a FP register.
325
326 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
327 /// world w.r.t vectors; which causes additional REV instructions to be
328 /// generated to compensate for the byte-swapping. But sometimes we do
329 /// need to re-interpret the data in SIMD vector registers in big-endian
330 /// mode without emitting such REV instructions.
332
333 MRS, // MRS, also sets the flags via a glue.
334
337
339
340 // Reciprocal estimates and steps.
345
350
356
357 // Floating-point reductions.
364
369
371
380
381 // Cast between vectors of the same element type but differ in length.
383
384 // Nodes to build an LD64B / ST64B 64-bit quantity out of i64, and vice versa
387
396
397 // Structured loads.
401
402 // Unsigned gather loads.
412
413 // Signed gather loads
421
422 // Unsigned gather loads.
430
431 // Signed gather loads.
439
440 // Non-temporal gather loads
444
445 // Contiguous masked store.
447
448 // Scatter store
458
459 // Non-temporal scatter store
462
463 // SME
468
469 // Needed for __arm_agnostic("sme_za_state")
472
473 // Asserts that a function argument (i32) is zero-extended to i8 by
474 // the caller
476
477 // 128-bit system register accesses
478 // lo64, hi64, chain = MRRS(chain, sysregname)
480 // chain = MSRR(chain, sysregname, lo64, hi64)
482
483 // Strict (exception-raising) floating point comparison
488
489 // NEON Load/Store with post-increment base updates
514
519
527
528 // SME ZA loads and stores
531};
532
533} // end namespace AArch64ISD
534
535namespace AArch64 {
536/// Possible values of current rounding mode, which is specified in bits
537/// 23:22 of FPCR.
539 RN = 0, // Round to Nearest
540 RP = 1, // Round towards Plus infinity
541 RM = 2, // Round towards Minus infinity
542 RZ = 3, // Round towards Zero
543 rmMask = 3 // Bit mask selecting rounding mode
545
546// Bit position of rounding mode bits in FPCR.
547const unsigned RoundingBitsPos = 22;
548
549// Reserved bits should be preserved when modifying FPCR.
550const uint64_t ReservedFPControlBits = 0xfffffffff80040f8;
551
552// Registers used to pass function arguments.
555
556/// Maximum allowed number of unprobed bytes above SP at an ABI
557/// boundary.
558const unsigned StackProbeMaxUnprobedStack = 1024;
559
560/// Maximum number of iterations to unroll for a constant size probing loop.
561const unsigned StackProbeMaxLoopUnroll = 4;
562
563} // namespace AArch64
564
565class AArch64Subtarget;
566
568public:
569 explicit AArch64TargetLowering(const TargetMachine &TM,
570 const AArch64Subtarget &STI);
571
572 /// Control the following reassociation of operands: (op (op x, c1), y) -> (op
573 /// (op x, y), c1) where N0 is (op x, c1) and N1 is y.
575 SDValue N1) const override;
576
577 /// Selects the correct CCAssignFn for a given CallingConvention value.
578 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
579
580 /// Selects the correct CCAssignFn for a given CallingConvention value.
582
583 /// Determine which of the bits specified in Mask are known to be either zero
584 /// or one and return them in the KnownZero/KnownOne bitsets.
586 const APInt &DemandedElts,
587 const SelectionDAG &DAG,
588 unsigned Depth = 0) const override;
589
591 const APInt &DemandedElts,
592 const SelectionDAG &DAG,
593 unsigned Depth) const override;
594
595 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
596 // Returning i64 unconditionally here (i.e. even for ILP32) means that the
597 // *DAG* representation of pointers will always be 64-bits. They will be
598 // truncated and extended when transferred to memory, but the 64-bit DAG
599 // allows us to use AArch64's addressing modes much more easily.
600 return MVT::getIntegerVT(64);
601 }
602
604 const APInt &DemandedElts,
605 TargetLoweringOpt &TLO) const override;
606
607 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
608
609 /// Returns true if the target allows unaligned memory accesses of the
610 /// specified type.
612 EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
614 unsigned *Fast = nullptr) const override;
615 /// LLT variant.
616 bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace,
617 Align Alignment,
619 unsigned *Fast = nullptr) const override;
620
621 /// Provide custom lowering hooks for some operations.
622 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
623
624 const char *getTargetNodeName(unsigned Opcode) const override;
625
626 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
627
628 /// This method returns a target specific FastISel object, or null if the
629 /// target does not support "fast" ISel.
631 const TargetLibraryInfo *libInfo) const override;
632
633 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
634
635 bool isFPImmLegal(const APFloat &Imm, EVT VT,
636 bool ForCodeSize) const override;
637
638 /// Return true if the given shuffle mask can be codegen'd directly, or if it
639 /// should be stack expanded.
640 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
641
642 /// Similar to isShuffleMaskLegal. Return true is the given 'select with zero'
643 /// shuffle mask can be codegen'd directly.
644 bool isVectorClearMaskLegal(ArrayRef<int> M, EVT VT) const override;
645
646 /// Return the ISD::SETCC ValueType.
648 EVT VT) const override;
649
651
653 MachineBasicBlock *BB) const;
654
656 MachineBasicBlock *BB) const;
657
659 MachineBasicBlock *MBB) const;
660
661 MachineBasicBlock *EmitTileLoad(unsigned Opc, unsigned BaseReg,
663 MachineBasicBlock *BB) const;
665 MachineBasicBlock *EmitZAInstr(unsigned Opc, unsigned BaseReg,
666 MachineInstr &MI, MachineBasicBlock *BB) const;
668 unsigned Opcode, bool Op0IsDef) const;
671 MachineBasicBlock *BB) const;
673 MachineBasicBlock *BB) const;
675 MachineBasicBlock *BB) const;
677 MachineBasicBlock *BB) const;
678
681 MachineBasicBlock *MBB) const override;
682
683 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
684 MachineFunction &MF,
685 unsigned Intrinsic) const override;
686
688 EVT NewVT) const override;
689
690 bool shouldRemoveRedundantExtend(SDValue Op) const override;
691
692 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
693 bool isTruncateFree(EVT VT1, EVT VT2) const override;
694
695 bool isProfitableToHoist(Instruction *I) const override;
696
697 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
698 bool isZExtFree(EVT VT1, EVT VT2) const override;
699 bool isZExtFree(SDValue Val, EVT VT2) const override;
700
702 Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override;
703
704 bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override;
705
706 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
707
710 ArrayRef<unsigned> Indices,
711 unsigned Factor) const override;
713 unsigned Factor) const override;
714
716 IntrinsicInst *DI, LoadInst *LI,
717 SmallVectorImpl<Instruction *> &DeadInsts) const override;
718
721 SmallVectorImpl<Instruction *> &DeadInsts) const override;
722
723 bool isLegalAddImmediate(int64_t) const override;
724 bool isLegalAddScalableImmediate(int64_t) const override;
725 bool isLegalICmpImmediate(int64_t) const override;
726
728 SDValue ConstNode) const override;
729
730 bool shouldConsiderGEPOffsetSplit() const override;
731
733 const AttributeList &FuncAttributes) const override;
734
736 const AttributeList &FuncAttributes) const override;
737
738 /// Return true if the addressing mode represented by AM is legal for this
739 /// target, for a load/store of the specified type.
740 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
741 unsigned AS,
742 Instruction *I = nullptr) const override;
743
744 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
745 int64_t MaxOffset) const override;
746
747 /// Return true if an FMA operation is faster than a pair of fmul and fadd
748 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
749 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
751 EVT VT) const override;
752 bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
753
755 CodeGenOptLevel OptLevel) const override;
756
757 /// Return true if the target has native support for
758 /// the specified value type and it is 'desirable' to use the type for the
759 /// given node type.
760 bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
761
762 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
764
765 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
767 CombineLevel Level) const override;
768
769 bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override {
770 return false;
771 }
772
773 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
774 bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
775
776 /// Return true if it is profitable to fold a pair of shifts into a mask.
778 CombineLevel Level) const override;
779
780 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
781 EVT VT) const override;
782
783 /// Returns true if it is beneficial to convert a load of a constant
784 /// to just the constant itself.
786 Type *Ty) const override;
787
788 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
789 /// with this index.
790 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
791 unsigned Index) const override;
792
793 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
794 bool MathUsed) const override {
795 // Using overflow ops for overflow checks only should beneficial on
796 // AArch64.
797 return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
798 }
799
800 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
801 AtomicOrdering Ord) const override;
803 AtomicOrdering Ord) const override;
804
805 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
806
807 bool isOpSuitableForLDPSTP(const Instruction *I) const;
808 bool isOpSuitableForLSE128(const Instruction *I) const;
809 bool isOpSuitableForRCPC3(const Instruction *I) const;
810 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
811 bool
813
815 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
817 shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
819 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
820
823
824 bool useLoadStackGuardNode(const Module &M) const override;
826 getPreferredVectorAction(MVT VT) const override;
827
828 /// If the target has a standard location for the stack protector cookie,
829 /// returns the address of that location. Otherwise, returns nullptr.
830 Value *getIRStackGuard(IRBuilderBase &IRB) const override;
831
832 void insertSSPDeclarations(Module &M) const override;
833 Value *getSDagStackGuard(const Module &M) const override;
834 Function *getSSPStackGuardCheck(const Module &M) const override;
835
836 /// If the target has a standard location for the unsafe stack pointer,
837 /// returns the address of that location. Otherwise, returns nullptr.
838 Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const override;
839
840 /// If a physical register, this returns the register that receives the
841 /// exception address on entry to an EH pad.
843 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
844
845 /// If a physical register, this returns the register that receives the
846 /// exception typeid on entry to a landing pad.
848 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
849
850 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
851
852 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
853 const MachineFunction &MF) const override;
854
855 bool isCheapToSpeculateCttz(Type *) const override {
856 return true;
857 }
858
859 bool isCheapToSpeculateCtlz(Type *) const override {
860 return true;
861 }
862
863 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
864
865 bool hasAndNotCompare(SDValue V) const override {
866 // We can use bics for any scalar.
867 return V.getValueType().isScalarInteger();
868 }
869
870 bool hasAndNot(SDValue Y) const override {
871 EVT VT = Y.getValueType();
872
873 if (!VT.isVector())
874 return hasAndNotCompare(Y);
875
876 TypeSize TS = VT.getSizeInBits();
877 // TODO: We should be able to use bic/bif too for SVE.
878 return !TS.isScalable() && TS.getFixedValue() >= 64; // vector 'bic'
879 }
880
883 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
884 SelectionDAG &DAG) const override;
885
888 unsigned ExpansionFactor) const override;
889
891 unsigned KeptBits) const override {
892 // For vectors, we don't have a preference..
893 if (XVT.isVector())
894 return false;
895
896 auto VTIsOk = [](EVT VT) -> bool {
897 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
898 VT == MVT::i64;
899 };
900
901 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
902 // XVT will be larger than KeptBitsVT.
903 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
904 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
905 }
906
907 bool preferIncOfAddToSubOfNot(EVT VT) const override;
908
909 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
910
911 bool shouldExpandCmpUsingSelects(EVT VT) const override;
912
913 bool isComplexDeinterleavingSupported() const override;
915 ComplexDeinterleavingOperation Operation, Type *Ty) const override;
916
919 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
920 Value *Accumulator = nullptr) const override;
921
922 bool supportSplitCSR(MachineFunction *MF) const override {
924 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
925 }
926 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
928 MachineBasicBlock *Entry,
929 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
930
931 bool supportSwiftError() const override {
932 return true;
933 }
934
935 bool supportPtrAuthBundles() const override { return true; }
936
937 bool supportKCFIBundles() const override { return true; }
938
941 const TargetInstrInfo *TII) const override;
942
943 /// Enable aggressive FMA fusion on targets that want it.
944 bool enableAggressiveFMAFusion(EVT VT) const override;
945
946 /// Returns the size of the platform's va_list object.
947 unsigned getVaListSizeInBits(const DataLayout &DL) const override;
948
949 /// Returns true if \p VecTy is a legal interleaved access type. This
950 /// function checks the vector element type and the overall width of the
951 /// vector.
953 bool &UseScalable) const;
954
955 /// Returns the number of interleaved accesses that will be generated when
956 /// lowering accesses of the given type.
957 unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL,
958 bool UseScalable) const;
959
961 const Instruction &I) const override;
962
964 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
965 const DataLayout &DL) const override;
966
967 /// Used for exception handling on Win64.
968 bool needsFixedCatchObjects() const override;
969
970 bool fallBackToDAGISel(const Instruction &Inst) const override;
971
972 /// SVE code generation for fixed length vectors does not custom lower
973 /// BUILD_VECTOR. This makes BUILD_VECTOR legalisation a source of stores to
974 /// merge. However, merging them creates a BUILD_VECTOR that is just as
975 /// illegal as the original, thus leading to an infinite legalisation loop.
976 /// NOTE: Once BUILD_VECTOR is legal or can be custom lowered for all legal
977 /// vector types this override can be removed.
978 bool mergeStoresAfterLegalization(EVT VT) const override;
979
980 // If the platform/function should have a redzone, return the size in bytes.
981 unsigned getRedZoneSize(const Function &F) const {
982 if (F.hasFnAttribute(Attribute::NoRedZone))
983 return 0;
984 return 128;
985 }
986
987 bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const;
989
991 bool AllowUnknown = false) const override;
992
993 bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
994
995 bool
997
998 bool shouldExpandCttzElements(EVT VT) const override;
999
1000 bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override;
1001
1002 /// If a change in streaming mode is required on entry to/return from a
1003 /// function call it emits and returns the corresponding SMSTART or SMSTOP
1004 /// node. \p Condition should be one of the enum values from
1005 /// AArch64SME::ToggleCondition.
1007 SDValue Chain, SDValue InGlue, unsigned Condition,
1008 SDValue PStateSM = SDValue()) const;
1009
1010 bool isVScaleKnownToBeAPowerOfTwo() const override { return true; }
1011
1012 // Normally SVE is only used for byte size vectors that do not fit within a
1013 // NEON vector. This changes when OverrideNEON is true, allowing SVE to be
1014 // used for 64bit and 128bit vectors as well.
1015 bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
1016
1017 // Follow NEON ABI rules even when using SVE for fixed length vectors.
1019 EVT VT) const override;
1022 EVT VT) const override;
1025 EVT &IntermediateVT,
1026 unsigned &NumIntermediates,
1027 MVT &RegisterVT) const override;
1028
1029 /// True if stack clash protection is enabled for this functions.
1030 bool hasInlineStackProbe(const MachineFunction &MF) const override;
1031
1032#ifndef NDEBUG
1033 void verifyTargetSDNode(const SDNode *N) const override;
1034#endif
1035
1036private:
1037 /// Keep a pointer to the AArch64Subtarget around so that we can
1038 /// make the right decision when generating code for different targets.
1039 const AArch64Subtarget *Subtarget;
1040
1041 llvm::BumpPtrAllocator BumpAlloc;
1042 llvm::StringSaver Saver{BumpAlloc};
1043
1044 bool isExtFreeImpl(const Instruction *Ext) const override;
1045
1046 void addTypeForNEON(MVT VT);
1047 void addTypeForFixedLengthSVE(MVT VT);
1048 void addDRType(MVT VT);
1049 void addQRType(MVT VT);
1050
1051 bool shouldExpandBuildVectorWithShuffles(EVT, unsigned) const override;
1052
1053 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
1054 bool isVarArg,
1055 const SmallVectorImpl<ISD::InputArg> &Ins,
1056 const SDLoc &DL, SelectionDAG &DAG,
1057 SmallVectorImpl<SDValue> &InVals) const override;
1058
1059 void AdjustInstrPostInstrSelection(MachineInstr &MI,
1060 SDNode *Node) const override;
1061
1062 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
1063 SmallVectorImpl<SDValue> &InVals) const override;
1064
1065 SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
1066 CallingConv::ID CallConv, bool isVarArg,
1067 const SmallVectorImpl<CCValAssign> &RVLocs,
1068 const SDLoc &DL, SelectionDAG &DAG,
1069 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
1070 SDValue ThisVal, bool RequiresSMChange) const;
1071
1072 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
1073 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
1074 SDValue LowerStore128(SDValue Op, SelectionDAG &DAG) const;
1075 SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
1076
1077 SDValue LowerMGATHER(SDValue Op, SelectionDAG &DAG) const;
1078 SDValue LowerMSCATTER(SDValue Op, SelectionDAG &DAG) const;
1079
1080 SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) const;
1081
1082 SDValue LowerVECTOR_COMPRESS(SDValue Op, SelectionDAG &DAG) const;
1083
1084 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1085 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1086 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
1087
1088 bool
1089 isEligibleForTailCallOptimization(const CallLoweringInfo &CLI) const;
1090
1091 /// Finds the incoming stack arguments which overlap the given fixed stack
1092 /// object and incorporates their load into the current chain. This prevents
1093 /// an upcoming store from clobbering the stack argument before it's used.
1094 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
1095 MachineFrameInfo &MFI, int ClobberedFI) const;
1096
1097 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
1098
1099 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
1100 SDValue &Chain) const;
1101
1102 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1103 bool isVarArg,
1104 const SmallVectorImpl<ISD::OutputArg> &Outs,
1105 LLVMContext &Context) const override;
1106
1107 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1108 const SmallVectorImpl<ISD::OutputArg> &Outs,
1109 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1110 SelectionDAG &DAG) const override;
1111
1112 SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
1113 unsigned Flag) const;
1114 SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
1115 unsigned Flag) const;
1116 SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
1117 unsigned Flag) const;
1118 SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
1119 unsigned Flag) const;
1120 SDValue getTargetNode(ExternalSymbolSDNode *N, EVT Ty, SelectionDAG &DAG,
1121 unsigned Flag) const;
1122 template <class NodeTy>
1123 SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1124 template <class NodeTy>
1125 SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1126 template <class NodeTy>
1127 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1128 template <class NodeTy>
1129 SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1130 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1131 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
1132 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1133 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1134 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1135 SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
1136 const SDLoc &DL, SelectionDAG &DAG) const;
1137 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
1138 SelectionDAG &DAG) const;
1139 SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1140 SDValue LowerPtrAuthGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
1141 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
1142 SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
1143 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
1144 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
1145 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
1146 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
1147 SDValue TVal, SDValue FVal, const SDLoc &dl,
1148 SelectionDAG &DAG) const;
1149 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1150 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1151 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
1152 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
1153 SDValue LowerBRIND(SDValue Op, SelectionDAG &DAG) const;
1154 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
1155 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
1156 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
1157 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
1158 SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
1159 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
1160 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
1161 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
1162 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
1163 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
1164 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1165 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
1166 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
1167 SDValue LowerGET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
1168 SDValue LowerSET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
1169 SDValue LowerRESET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
1170 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1171 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1172 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1173 SDValue LowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
1174 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
1175 SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1176 SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
1177 SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
1178 unsigned NewOp) const;
1179 SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
1180 SDValue LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
1181 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
1182 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
1183 SDValue LowerVECTOR_DEINTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
1184 SDValue LowerVECTOR_INTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
1185 SDValue LowerVECTOR_HISTOGRAM(SDValue Op, SelectionDAG &DAG) const;
1186 SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const;
1187 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
1188 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
1189 SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const;
1190 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
1191 SDValue LowerCTPOP_PARITY(SDValue Op, SelectionDAG &DAG) const;
1192 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
1193 SDValue LowerBitreverse(SDValue Op, SelectionDAG &DAG) const;
1194 SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const;
1195 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
1196 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
1197 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
1198 SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
1199 SDValue LowerVectorFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
1200 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
1201 SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
1202 SDValue LowerVectorXRINT(SDValue Op, SelectionDAG &DAG) const;
1203 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1204 SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1205 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
1206 SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
1207 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
1208 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
1209 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
1210 SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
1211 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
1212 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
1213 SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
1214 SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
1215 SDValue LowerInlineDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
1216 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
1217
1218 SDValue LowerAVG(SDValue Op, SelectionDAG &DAG, unsigned NewOp) const;
1219
1220 SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op,
1221 SelectionDAG &DAG) const;
1222 SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op,
1223 SelectionDAG &DAG) const;
1224 SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
1225 SDValue LowerFixedLengthVectorMLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
1226 SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const;
1227 SDValue LowerPredReductionToSVE(SDValue ScalarOp, SelectionDAG &DAG) const;
1228 SDValue LowerReductionToSVE(unsigned Opcode, SDValue ScalarOp,
1229 SelectionDAG &DAG) const;
1230 SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const;
1231 SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const;
1232 SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const;
1233 SDValue LowerFixedLengthVectorMStoreToSVE(SDValue Op,
1234 SelectionDAG &DAG) const;
1235 SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op,
1236 SelectionDAG &DAG) const;
1237 SDValue LowerFixedLengthExtractVectorElt(SDValue Op, SelectionDAG &DAG) const;
1238 SDValue LowerFixedLengthInsertVectorElt(SDValue Op, SelectionDAG &DAG) const;
1239 SDValue LowerFixedLengthBitcastToSVE(SDValue Op, SelectionDAG &DAG) const;
1240 SDValue LowerFixedLengthConcatVectorsToSVE(SDValue Op,
1241 SelectionDAG &DAG) const;
1242 SDValue LowerFixedLengthFPExtendToSVE(SDValue Op, SelectionDAG &DAG) const;
1243 SDValue LowerFixedLengthFPRoundToSVE(SDValue Op, SelectionDAG &DAG) const;
1244 SDValue LowerFixedLengthIntToFPToSVE(SDValue Op, SelectionDAG &DAG) const;
1245 SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const;
1246 SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op,
1247 SelectionDAG &DAG) const;
1248 SDValue LowerFixedLengthBuildVectorToSVE(SDValue Op, SelectionDAG &DAG) const;
1249
1250 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
1251 SmallVectorImpl<SDNode *> &Created) const override;
1252 SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
1253 SmallVectorImpl<SDNode *> &Created) const override;
1254 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1255 int &ExtraSteps, bool &UseOneConst,
1256 bool Reciprocal) const override;
1257 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1258 int &ExtraSteps) const override;
1259 SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
1260 const DenormalMode &Mode) const override;
1261 SDValue getSqrtResultForDenormInput(SDValue Operand,
1262 SelectionDAG &DAG) const override;
1263 unsigned combineRepeatedFPDivisors() const override;
1264
1265 ConstraintType getConstraintType(StringRef Constraint) const override;
1266 Register getRegisterByName(const char* RegName, LLT VT,
1267 const MachineFunction &MF) const override;
1268
1269 /// Examine constraint string and operand type and determine a weight value.
1270 /// The operand object must already have been set up with the operand type.
1272 getSingleConstraintMatchWeight(AsmOperandInfo &info,
1273 const char *constraint) const override;
1274
1275 std::pair<unsigned, const TargetRegisterClass *>
1276 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1277 StringRef Constraint, MVT VT) const override;
1278
1279 const char *LowerXConstraint(EVT ConstraintVT) const override;
1280
1281 void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
1282 std::vector<SDValue> &Ops,
1283 SelectionDAG &DAG) const override;
1284
1286 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
1287 if (ConstraintCode == "Q")
1289 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
1290 // followed by llvm_unreachable so we'll leave them unimplemented in
1291 // the backend for now.
1292 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
1293 }
1294
1295 /// Handle Lowering flag assembly outputs.
1296 SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
1297 const SDLoc &DL,
1298 const AsmOperandInfo &Constraint,
1299 SelectionDAG &DAG) const override;
1300
1301 bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const override;
1302 bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;
1303 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
1304 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
1305 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1306 bool getIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
1307 SDValue &Offset, SelectionDAG &DAG) const;
1308 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
1310 SelectionDAG &DAG) const override;
1311 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
1312 SDValue &Offset, ISD::MemIndexedMode &AM,
1313 SelectionDAG &DAG) const override;
1314 bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
1315 bool IsPre, MachineRegisterInfo &MRI) const override;
1316
1317 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
1318 SelectionDAG &DAG) const override;
1319 void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
1320 SelectionDAG &DAG) const;
1321 void ReplaceExtractSubVectorResults(SDNode *N,
1322 SmallVectorImpl<SDValue> &Results,
1323 SelectionDAG &DAG) const;
1324
1325 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
1326
1327 void finalizeLowering(MachineFunction &MF) const override;
1328
1329 bool shouldLocalize(const MachineInstr &MI,
1330 const TargetTransformInfo *TTI) const override;
1331
1332 bool SimplifyDemandedBitsForTargetNode(SDValue Op,
1333 const APInt &OriginalDemandedBits,
1334 const APInt &OriginalDemandedElts,
1335 KnownBits &Known,
1336 TargetLoweringOpt &TLO,
1337 unsigned Depth) const override;
1338
1339 bool isTargetCanonicalConstantNode(SDValue Op) const override;
1340
1341 // With the exception of data-predicate transitions, no instructions are
1342 // required to cast between legal scalable vector types. However:
1343 // 1. Packed and unpacked types have different bit lengths, meaning BITCAST
1344 // is not universally useable.
1345 // 2. Most unpacked integer types are not legal and thus integer extends
1346 // cannot be used to convert between unpacked and packed types.
1347 // These can make "bitcasting" a multiphase process. REINTERPRET_CAST is used
1348 // to transition between unpacked and packed types of the same element type,
1349 // with BITCAST used otherwise.
1350 // This function does not handle predicate bitcasts.
1351 SDValue getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const;
1352
1353 // Returns the runtime value for PSTATE.SM by generating a call to
1354 // __arm_sme_state.
1355 SDValue getRuntimePStateSM(SelectionDAG &DAG, SDValue Chain, SDLoc DL,
1356 EVT VT) const;
1357
1358 bool preferScalarizeSplat(SDNode *N) const override;
1359
1360 unsigned getMinimumJumpTableEntries() const override;
1361
1362 bool softPromoteHalfType() const override { return true; }
1363
1364 bool shouldScalarizeBinop(SDValue VecOp) const override {
1365 return VecOp.getOpcode() == ISD::SETCC;
1366 }
1367};
1368
1369namespace AArch64 {
1370FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1371 const TargetLibraryInfo *libInfo);
1372} // end namespace AArch64
1373
1374} // end namespace llvm
1375
1376#endif
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
uint64_t Addr
uint32_t Index
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
static cl::opt< RegAllocEvictionAdvisorAnalysis::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Development, "development", "for training")))
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II, StoreInst *SI, SmallVectorImpl< Instruction * > &DeadInsts) const override
Lower an interleave intrinsic to a target specific store intrinsic.
bool shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const override
Return true if the @llvm.experimental.vector.partial.reduce.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
unsigned getVaListSizeInBits(const DataLayout &DL) const override
Returns the size of the platform's va_list object.
MachineBasicBlock * EmitZAInstr(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const override
Return the prefered common base offset.
bool shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert a trailing fence without reducing the ordering f...
bool shouldExpandCttzElements(EVT VT) const override
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI, LoadInst *LI, SmallVectorImpl< Instruction * > &DeadInsts) const override
Lower a deinterleave intrinsic to a target specific load intrinsic.
MachineBasicBlock * EmitInitTPIDR2Object(MachineInstr &MI, MachineBasicBlock *BB) const
MachineBasicBlock * EmitTileLoad(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL, bool UseScalable) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
bool shouldExpandCmpUsingSelects(EVT VT) const override
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean ...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool shouldRemoveRedundantExtend(SDValue Op) const override
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool supportPtrAuthBundles() const override
Return true if the target supports ptrauth operand bundles.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const override
bool isOpSuitableForLSE128(const Instruction *I) const
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a ldN intrinsic.
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool fallBackToDAGISel(const Instruction &Inst) const override
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override
Return true if the target has native support for the specified value type and it is 'desirable' to us...
bool isLegalAddScalableImmediate(int64_t) const override
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Try to convert math with an overflow comparison into the corresponding DAG node operation.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const override
Create the IR node for the given complex deinterleaving operation.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const
Returns true if VecTy is a legal interleaved access type.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
MachineBasicBlock * EmitLoweredCatchRet(MachineInstr &MI, MachineBasicBlock *BB) const
bool isComplexDeinterleavingSupported() const override
Does this target support complex deinterleaving.
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const override
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
bool isOpSuitableForRCPC3(const Instruction *I) const
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const override
Return true if it is profitable to reduce a load to a smaller type.
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isCheapToSpeculateCttz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a stN intrinsic.
unsigned getRedZoneSize(const Function &F) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
MachineBasicBlock * EmitZTInstr(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, bool Op0IsDef) const
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
MachineBasicBlock * EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const
bool isCheapToSpeculateCtlz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const override
Control the following reassociation of operands: (op (op x, c1), y) -> (op (op x, y),...
void verifyTargetSDNode(const SDNode *N) const override
Check the given SDNode. Aborts if it is invalid.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &FuncAttributes) const override
LLT returning variant.
bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x),...
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
MachineBasicBlock * EmitAllocateSMESaveBuffer(MachineInstr &MI, MachineBasicBlock *BB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool needsFixedCatchObjects() const override
Used for exception handling on Win64.
MachineBasicBlock * EmitAllocateZABuffer(MachineInstr &MI, MachineBasicBlock *BB) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
bool generateFMAsInMachineCombiner(EVT VT, CodeGenOptLevel OptLevel) const override
bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const override
Does this target support complex deinterleaving with the given operation and type.
bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool isOpSuitableForLDPSTP(const Instruction *I) const
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
MachineBasicBlock * EmitGetSMESaveSize(MachineInstr &MI, MachineBasicBlock *BB) const
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool shouldConsiderGEPOffsetSplit() const override
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isVectorClearMaskLegal(ArrayRef< int > M, EVT VT) const override
Similar to isShuffleMaskLegal.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool useLoadStackGuardNode(const Module &M) const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
ArrayRef< MCPhysReg > getRoundingControlRegisters() const override
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool enableAggressiveFMAFusion(EVT VT) const override
Enable aggressive FMA fusion on targets that want it.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
MachineBasicBlock * EmitDynamicProbedAlloc(MachineInstr &MI, MachineBasicBlock *MBB) const
SDValue changeStreamingMode(SelectionDAG &DAG, SDLoc DL, bool Enable, SDValue Chain, SDValue InGlue, unsigned Condition, SDValue PStateSM=SDValue()) const
If a change in streaming mode is required on entry to/return from a function call it emits and return...
bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
bool supportKCFIBundles() const override
Return true if the target supports kcfi operand bundles.
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON=false) const
bool mergeStoresAfterLegalization(EVT VT) const override
SVE code generation for fixed length vectors does not custom lower BUILD_VECTOR.
Class for arbitrary precision integers.
Definition: APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:277
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
Machine Value Type.
static MVT getIntegerVT(unsigned BitWidth)
Instructions::iterator instr_iterator
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Definition: MachineInstr.h:69
Flags
Flags values. These may be or'd together.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
An instruction for storing to memory.
Definition: Instructions.h:292
Saves strings in the provided stable storage and returns a StringRef with a stable character pointer.
Definition: StringSaver.h:21
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
@ NVCAST
Natural vector cast.
ArrayRef< MCPhysReg > getFPRArgRegs()
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPCR.
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
const unsigned RoundingBitsPos
const uint64_t ReservedFPControlBits
ArrayRef< MCPhysReg > getGPRArgRegs()
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:780
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1490
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1551
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1602
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:1582
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
AtomicOrdering
Atomic ordering for LLVM's memory model.
TargetTransformInfo TTI
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CombineLevel
Definition: DAGCombine.h:15
DWARFExpression::Operation Op
@ Enable
Enable colors.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168