LLVM 23.0.0git
AArch64ISelLowering.h
Go to the documentation of this file.
1//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that AArch64 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16
21#include "llvm/IR/CallingConv.h"
22#include "llvm/IR/Instruction.h"
23
24namespace llvm {
25
27
28namespace AArch64 {
29/// Possible values of current rounding mode, which is specified in bits
30/// 23:22 of FPCR.
32 RN = 0, // Round to Nearest
33 RP = 1, // Round towards Plus infinity
34 RM = 2, // Round towards Minus infinity
35 RZ = 3, // Round towards Zero
36 rmMask = 3 // Bit mask selecting rounding mode
37};
38
39// Bit position of rounding mode bits in FPCR.
40const unsigned RoundingBitsPos = 22;
41
42// Reserved bits should be preserved when modifying FPCR.
43const uint64_t ReservedFPControlBits = 0xfffffffff80040f8;
44
45// Registers used to pass function arguments.
48
49/// Maximum allowed number of unprobed bytes above SP at an ABI
50/// boundary.
51const unsigned StackProbeMaxUnprobedStack = 1024;
52
53/// Maximum number of iterations to unroll for a constant size probing loop.
54const unsigned StackProbeMaxLoopUnroll = 4;
55
56} // namespace AArch64
57
58namespace ARM64AS {
59enum : unsigned { PTR32_SPTR = 270, PTR32_UPTR = 271, PTR64 = 272 };
60}
61
62class AArch64Subtarget;
63
65public:
66 explicit AArch64TargetLowering(const TargetMachine &TM,
67 const AArch64Subtarget &STI);
68
69 const AArch64TargetMachine &getTM() const;
70
71 /// Control the following reassociation of operands: (op (op x, c1), y) -> (op
72 /// (op x, y), c1) where N0 is (op x, c1) and N1 is y.
74 SDValue N1) const override;
75
76 /// Selects the correct CCAssignFn for a given CallingConvention value.
77 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
78
79 /// Selects the correct CCAssignFn for a given CallingConvention value.
81
82 /// Determine which of the bits specified in Mask are known to be either zero
83 /// or one and return them in the KnownZero/KnownOne bitsets.
85 const APInt &DemandedElts,
86 const SelectionDAG &DAG,
87 unsigned Depth = 0) const override;
88
90 const APInt &DemandedElts,
91 const SelectionDAG &DAG,
92 unsigned Depth) const override;
93
94 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
95 if ((AS == ARM64AS::PTR32_SPTR) || (AS == ARM64AS::PTR32_UPTR)) {
96 // These are 32-bit pointers created using the `__ptr32` extension or
97 // similar. They are handled by marking them as being in a different
98 // address space, and will be extended to 64-bits when used as the target
99 // of a load or store operation, or cast to a 64-bit pointer type.
100 return MVT::i32;
101 } else {
102 // Returning i64 unconditionally here (i.e. even for ILP32) means that the
103 // *DAG* representation of pointers will always be 64-bits. They will be
104 // truncated and extended when transferred to memory, but the 64-bit DAG
105 // allows us to use AArch64's addressing modes much more easily.
106 return MVT::i64;
107 }
108 }
109
110 unsigned getVectorIdxWidth(const DataLayout &DL) const override {
111 // The VectorIdx type is i64, with both normal and ilp32.
112 return 64;
113 }
114
116 const APInt &DemandedElts,
117 TargetLoweringOpt &TLO) const override;
118
119 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
120
121 /// Returns true if the target allows unaligned memory accesses of the
122 /// specified type.
124 EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
126 unsigned *Fast = nullptr) const override;
127 /// LLT variant.
128 bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace,
129 Align Alignment,
131 unsigned *Fast = nullptr) const override;
132
133 /// Provide custom lowering hooks for some operations.
134 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
135
136 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
137
138 /// This method returns a target specific FastISel object, or null if the
139 /// target does not support "fast" ISel.
140 FastISel *
142 const TargetLibraryInfo *libInfo,
143 const LibcallLoweringInfo *libcallLowering) const override;
144
145 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
146
147 bool isFPImmLegal(const APFloat &Imm, EVT VT,
148 bool ForCodeSize) const override;
149
150 /// Return true if the given shuffle mask can be codegen'd directly, or if it
151 /// should be stack expanded.
152 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
153
154 /// Similar to isShuffleMaskLegal. Return true is the given 'select with zero'
155 /// shuffle mask can be codegen'd directly.
156 bool isVectorClearMaskLegal(ArrayRef<int> M, EVT VT) const override;
157
158 /// Return the ISD::SETCC ValueType.
160 EVT VT) const override;
161
163
165 MachineBasicBlock *BB) const;
166
168 MachineBasicBlock *BB) const;
169
171 MachineBasicBlock *MBB) const;
172
174 MachineBasicBlock *MBB) const;
175
176 MachineBasicBlock *EmitTileLoad(unsigned Opc, unsigned BaseReg,
178 MachineBasicBlock *BB) const;
180 MachineBasicBlock *EmitZAInstr(unsigned Opc, unsigned BaseReg,
181 MachineInstr &MI, MachineBasicBlock *BB) const;
183 unsigned Opcode, bool Op0IsDef) const;
185
186 // Note: The following group of functions are only used as part of the old SME
187 // ABI lowering. They will be removed once -aarch64-new-sme-abi=true is the
188 // default.
190 MachineBasicBlock *BB) const;
192 MachineBasicBlock *BB) const;
194 MachineBasicBlock *BB) const;
196 MachineBasicBlock *BB) const;
198 MachineBasicBlock *BB) const;
199
200 /// Replace (0, vreg) discriminator components with the operands of blend
201 /// or with (immediate, NoRegister) when possible.
203 MachineOperand &IntDiscOp,
204 MachineOperand &AddrDiscOp,
205 const TargetRegisterClass *AddrDiscRC) const;
206
209 MachineBasicBlock *MBB) const override;
210
211 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I,
212 MachineFunction &MF,
213 unsigned Intrinsic) const override;
214
215 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT,
216 std::optional<unsigned> ByteOffset) const override;
217
218 bool shouldRemoveRedundantExtend(SDValue Op) const override;
219
220 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
221 bool isTruncateFree(EVT VT1, EVT VT2) const override;
222
223 bool isProfitableToHoist(Instruction *I) const override;
224
225 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
226 bool isZExtFree(EVT VT1, EVT VT2) const override;
227 bool isZExtFree(SDValue Val, EVT VT2) const override;
228
230 Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override;
231
232 bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override;
233
234 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
235
236 bool lowerInterleavedLoad(Instruction *Load, Value *Mask,
238 ArrayRef<unsigned> Indices, unsigned Factor,
239 const APInt &GapMask) const override;
240 bool lowerInterleavedStore(Instruction *Store, Value *Mask,
241 ShuffleVectorInst *SVI, unsigned Factor,
242 const APInt &GapMask) const override;
243
245 IntrinsicInst *DI) const override;
246
248 Instruction *Store, Value *Mask,
249 ArrayRef<Value *> InterleaveValues) const override;
250
251 bool isLegalAddImmediate(int64_t) const override;
252 bool isLegalAddScalableImmediate(int64_t) const override;
253 bool isLegalICmpImmediate(int64_t) const override;
254
256 SDValue ConstNode) const override;
257
258 bool shouldConsiderGEPOffsetSplit() const override;
259
260 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
261 const AttributeList &FuncAttributes) const override;
262
264 const AttributeList &FuncAttributes) const override;
265
266 /// Return true if the addressing mode represented by AM is legal for this
267 /// target, for a load/store of the specified type.
268 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
269 unsigned AS,
270 Instruction *I = nullptr) const override;
271
272 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
273 int64_t MaxOffset) const override;
274
275 /// Return true if an FMA operation is faster than a pair of fmul and fadd
276 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
277 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
279 EVT VT) const override;
280 bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
281
283 CodeGenOptLevel OptLevel) const override;
284
285 /// Return true if the target has native support for
286 /// the specified value type and it is 'desirable' to use the type for the
287 /// given node type.
288 bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
289
290 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
292
293 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
295 CombineLevel Level) const override;
296
297 bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override {
298 return false;
299 }
300
301 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
302 bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
303
304 /// Return true if it is profitable to fold a pair of shifts into a mask.
305 bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override;
306
307 /// Return true if it is profitable to fold a pair of shifts into a mask.
309 EVT VT = Y.getValueType();
310
311 if (VT.isVector())
312 return false;
313
314 return VT.getScalarSizeInBits() <= 64;
315 }
316
317 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
318 unsigned SelectOpcode, SDValue X,
319 SDValue Y) const override;
320
321 /// Returns true if it is beneficial to convert a load of a constant
322 /// to just the constant itself.
324 Type *Ty) const override;
325
326 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
327 /// with this index.
328 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
329 unsigned Index) const override;
330
331 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
332 bool MathUsed) const override {
333 // Using overflow ops for overflow checks only should beneficial on
334 // AArch64.
335 return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
336 }
337
338 // Return true if the target wants to optimize the mul overflow intrinsic
339 // for the given \p VT.
341 EVT VT) const override;
342
343 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
344 AtomicOrdering Ord) const override;
345 Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
346 AtomicOrdering Ord) const override;
347
348 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
349
350 bool isOpSuitableForLDPSTP(const Instruction *I) const;
351 bool isOpSuitableForLSE128(const Instruction *I) const;
352 bool isOpSuitableForRCPC3(const Instruction *I) const;
353 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
355 const Instruction *I) const override;
356
358 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
362 shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override;
363
365 shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const override;
366
367 bool useLoadStackGuardNode(const Module &M) const override;
369 getPreferredVectorAction(MVT VT) const override;
370
371 /// If the target has a standard location for the stack protector cookie,
372 /// returns the address of that location. Otherwise, returns nullptr.
374 const LibcallLoweringInfo &Libcalls) const override;
375
376 void
378 const LibcallLoweringInfo &Libcalls) const override;
379
380 /// If the target has a standard location for the unsafe stack pointer,
381 /// returns the address of that location. Otherwise, returns nullptr.
383 IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override;
384
385 /// If a physical register, this returns the register that receives the
386 /// exception address on entry to an EH pad.
388 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
389
390 /// If a physical register, this returns the register that receives the
391 /// exception typeid on entry to a landing pad.
393 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
394
395 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
396
397 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
398 const MachineFunction &MF) const override;
399
400 bool isCheapToSpeculateCttz(Type *) const override {
401 return true;
402 }
403
404 bool isCheapToSpeculateCtlz(Type *) const override {
405 return true;
406 }
407
408 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
409
410 bool hasAndNotCompare(SDValue V) const override {
411 // We can use bics for any scalar.
412 return V.getValueType().isScalarInteger();
413 }
414
415 bool hasAndNot(SDValue Y) const override {
416 EVT VT = Y.getValueType();
417
418 if (!VT.isVector())
419 return hasAndNotCompare(Y);
420
421 if (VT.isScalableVector())
422 return true;
423
424 return VT.getFixedSizeInBits() >= 64; // vector 'bic'
425 }
426
429 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
430 SelectionDAG &DAG) const override;
431
434 unsigned ExpansionFactor) const override;
435
437 unsigned KeptBits) const override {
438 // For vectors, we don't have a preference..
439 if (XVT.isVector())
440 return false;
441
442 auto VTIsOk = [](EVT VT) -> bool {
443 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
444 VT == MVT::i64;
445 };
446
447 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
448 // XVT will be larger than KeptBitsVT.
449 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
450 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
451 }
452
453 bool preferIncOfAddToSubOfNot(EVT VT) const override;
454
455 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
456
457 bool preferSelectsOverBooleanArithmetic(EVT VT) const override;
458
459 bool isComplexDeinterleavingSupported() const override;
461 ComplexDeinterleavingOperation Operation, Type *Ty) const override;
462
465 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
466 Value *Accumulator = nullptr) const override;
467
468 bool supportSplitCSR(MachineFunction *MF) const override {
470 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
471 }
472 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
474 MachineBasicBlock *Entry,
475 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
476
477 bool supportSwiftError() const override {
478 return true;
479 }
480
481 bool supportPtrAuthBundles() const override { return true; }
482
483 bool supportKCFIBundles() const override { return true; }
484
487 const TargetInstrInfo *TII) const override;
488
489 /// Enable aggressive FMA fusion on targets that want it.
490 bool enableAggressiveFMAFusion(EVT VT) const override;
491
492 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override {
493 return true;
494 }
495
496 /// Returns the size of the platform's va_list object.
497 unsigned getVaListSizeInBits(const DataLayout &DL) const override;
498
499 /// Returns true if \p VecTy is a legal interleaved access type. This
500 /// function checks the vector element type and the overall width of the
501 /// vector.
503 bool &UseScalable) const;
504
505 /// Returns the number of interleaved accesses that will be generated when
506 /// lowering accesses of the given type.
507 unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL,
508 bool UseScalable) const;
509
511 const Instruction &I) const override;
512
514 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
515 const DataLayout &DL) const override;
516
517 /// Used for exception handling on Win64.
518 bool needsFixedCatchObjects() const override;
519
520 bool fallBackToDAGISel(const Instruction &Inst) const override;
521
522 /// SVE code generation for fixed length vectors does not custom lower
523 /// BUILD_VECTOR. This makes BUILD_VECTOR legalisation a source of stores to
524 /// merge. However, merging them creates a BUILD_VECTOR that is just as
525 /// illegal as the original, thus leading to an infinite legalisation loop.
526 /// NOTE: Once BUILD_VECTOR is legal or can be custom lowered for all legal
527 /// vector types this override can be removed.
528 bool mergeStoresAfterLegalization(EVT VT) const override;
529
530 // If the platform/function should have a redzone, return the size in bytes.
531 unsigned getRedZoneSize(const Function &F) const {
532 if (F.hasFnAttribute(Attribute::NoRedZone))
533 return 0;
534 return 128;
535 }
536
537 bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const;
539
541 bool AllowUnknown = false) const override;
542
543 bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
544
545 bool shouldExpandCttzElements(EVT VT) const override;
546
547 bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override;
548
549 /// If a change in streaming mode is required on entry to/return from a
550 /// function call it emits and returns the corresponding SMSTART or SMSTOP
551 /// node. \p Condition should be one of the enum values from
552 /// AArch64SME::ToggleCondition.
554 SDValue Chain, SDValue InGlue, unsigned Condition,
555 bool InsertVectorLengthCheck = false) const;
556
557 bool isVScaleKnownToBeAPowerOfTwo() const override { return true; }
558
559 // Normally SVE is only used for byte size vectors that do not fit within a
560 // NEON vector. This changes when OverrideNEON is true, allowing SVE to be
561 // used for 64bit and 128bit vectors as well.
562 bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
563
564 // Follow NEON ABI rules even when using SVE for fixed length vectors.
566 EVT VT) const override;
569 EVT VT) const override;
571 CallingConv::ID CC, EVT VT,
572 EVT &IntermediateVT,
573 unsigned &NumIntermediates,
574 MVT &RegisterVT) const override;
575
576 /// True if stack clash protection is enabled for this functions.
577 bool hasInlineStackProbe(const MachineFunction &MF) const override;
578
579 /// In AArch64, true if FEAT_CPA is present. Allows pointer arithmetic
580 /// semantics to be preserved for instruction selection.
581 bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override;
582
583private:
584 /// Keep a pointer to the AArch64Subtarget around so that we can
585 /// make the right decision when generating code for different targets.
586 const AArch64Subtarget *Subtarget;
587
588 bool isExtFreeImpl(const Instruction *Ext) const override;
589
590 void addTypeForNEON(MVT VT);
591 void addTypeForFixedLengthSVE(MVT VT);
592 void addDRType(MVT VT);
593 void addQRType(MVT VT);
594
595 bool shouldExpandBuildVectorWithShuffles(EVT, unsigned) const override;
596
597 SDValue lowerEHPadEntry(SDValue Chain, SDLoc const &DL,
598 SelectionDAG &DAG) const override;
599
600 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
601 bool isVarArg,
603 const SDLoc &DL, SelectionDAG &DAG,
604 SmallVectorImpl<SDValue> &InVals) const override;
605
606 void AdjustInstrPostInstrSelection(MachineInstr &MI,
607 SDNode *Node) const override;
608
609 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
610 SmallVectorImpl<SDValue> &InVals) const override;
611
613 CallingConv::ID CallConv, bool isVarArg,
614 const SmallVectorImpl<CCValAssign> &RVLocs,
615 const SDLoc &DL, SelectionDAG &DAG,
616 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
617 SDValue ThisVal, bool RequiresSMChange) const;
618
621 SDValue LowerStore128(SDValue Op, SelectionDAG &DAG) const;
623 SDValue LowerFMUL(SDValue Op, SelectionDAG &DAG) const;
624 SDValue LowerFMA(SDValue Op, SelectionDAG &DAG) const;
625
628
630
631 SDValue LowerVECTOR_COMPRESS(SDValue Op, SelectionDAG &DAG) const;
632
634 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
635 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
636
637 bool
638 isEligibleForTailCallOptimization(const CallLoweringInfo &CLI) const;
639
640 /// Finds the incoming stack arguments which overlap the given fixed stack
641 /// object and incorporates their load into the current chain. This prevents
642 /// an upcoming store from clobbering the stack argument before it's used.
643 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
644 MachineFrameInfo &MFI, int ClobberedFI) const;
645
646 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
647
648 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
649 SDValue &Chain) const;
650
651 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
652 bool isVarArg,
654 LLVMContext &Context, const Type *RetTy) const override;
655
656 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
658 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
659 SelectionDAG &DAG) const override;
660
662 unsigned Flag) const;
664 unsigned Flag) const;
666 unsigned Flag) const;
668 unsigned Flag) const;
670 unsigned Flag) const;
671 template <class NodeTy>
672 SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
673 template <class NodeTy>
674 SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
675 template <class NodeTy>
676 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
677 template <class NodeTy>
678 SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
679 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
680 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
681 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
682 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
683 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
684 SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
685 const SDLoc &DL, SelectionDAG &DAG) const;
686 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
687 SelectionDAG &DAG) const;
688 SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
689 SDValue LowerPtrAuthGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
690 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
693 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
696 SDValue TVal, SDValue FVal,
698 SDNodeFlags Flags, const SDLoc &dl,
699 SelectionDAG &DAG) const;
700 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
702 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
703 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
704 SDValue LowerBRIND(SDValue Op, SelectionDAG &DAG) const;
705 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
706 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
707 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
708 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
709 SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
714 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
716 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
717 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
718 SDValue LowerGET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
719 SDValue LowerSET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
720 SDValue LowerRESET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
721 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
723 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
725 SDValue LowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
727 SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
728 SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
729 SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
730 unsigned NewOp) const;
731 SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
732 SDValue LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
735 SDValue LowerVECTOR_DEINTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
736 SDValue LowerVECTOR_INTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
737 SDValue LowerVECTOR_HISTOGRAM(SDValue Op, SelectionDAG &DAG) const;
738 SDValue LowerPARTIAL_REDUCE_MLA(SDValue Op, SelectionDAG &DAG) const;
739 SDValue LowerGET_ACTIVE_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
740 SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const;
742 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
745 SDValue LowerCTPOP_PARITY(SDValue Op, SelectionDAG &DAG) const;
747 SDValue LowerBitreverse(SDValue Op, SelectionDAG &DAG) const;
748 SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const;
750 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
751 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
753 SDValue LowerVectorFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
754 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
756 SDValue LowerVectorXRINT(SDValue Op, SelectionDAG &DAG) const;
757 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
759 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
760 SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
762 SDValue LowerLOOP_DEPENDENCE_MASK(SDValue Op, SelectionDAG &DAG) const;
764 SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
765 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
766 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
767 SDValue LowerVECREDUCE_MUL(SDValue Op, SelectionDAG &DAG) const;
768 SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
769 SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
770 SDValue LowerInlineDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
773
774 SDValue LowerAVG(SDValue Op, SelectionDAG &DAG, unsigned NewOp) const;
775
776 SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op,
777 SelectionDAG &DAG) const;
778 SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op,
779 SelectionDAG &DAG) const;
780 SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
781 SDValue LowerFixedLengthVectorMLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
782 SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const;
783 SDValue LowerPredReductionToSVE(SDValue ScalarOp, SelectionDAG &DAG) const;
784 SDValue LowerReductionToSVE(unsigned Opcode, SDValue ScalarOp,
785 SelectionDAG &DAG) const;
786 SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const;
787 SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const;
788 SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const;
789 SDValue LowerFixedLengthVectorMStoreToSVE(SDValue Op,
790 SelectionDAG &DAG) const;
791 SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op,
792 SelectionDAG &DAG) const;
793 SDValue LowerFixedLengthExtractVectorElt(SDValue Op, SelectionDAG &DAG) const;
794 SDValue LowerFixedLengthInsertVectorElt(SDValue Op, SelectionDAG &DAG) const;
795 SDValue LowerFixedLengthBitcastToSVE(SDValue Op, SelectionDAG &DAG) const;
796 SDValue LowerFixedLengthConcatVectorsToSVE(SDValue Op,
797 SelectionDAG &DAG) const;
798 SDValue LowerFixedLengthFPExtendToSVE(SDValue Op, SelectionDAG &DAG) const;
799 SDValue LowerFixedLengthFPRoundToSVE(SDValue Op, SelectionDAG &DAG) const;
800 SDValue LowerFixedLengthIntToFPToSVE(SDValue Op, SelectionDAG &DAG) const;
801 SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const;
802 SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op,
803 SelectionDAG &DAG) const;
804 SDValue LowerFixedLengthBuildVectorToSVE(SDValue Op, SelectionDAG &DAG) const;
805 SDValue LowerFixedLengthVectorCompressToSVE(SDValue Op,
806 SelectionDAG &DAG) const;
807
808 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
809 SmallVectorImpl<SDNode *> &Created) const override;
810 SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
811 SmallVectorImpl<SDNode *> &Created) const override;
812 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
813 int &ExtraSteps, bool &UseOneConst,
814 bool Reciprocal) const override;
815 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
816 int &ExtraSteps) const override;
817 SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
818 const DenormalMode &Mode) const override;
819 SDValue getSqrtResultForDenormInput(SDValue Operand,
820 SelectionDAG &DAG) const override;
821 unsigned combineRepeatedFPDivisors() const override;
822
823 ConstraintType getConstraintType(StringRef Constraint) const override;
824 Register getRegisterByName(const char* RegName, LLT VT,
825 const MachineFunction &MF) const override;
826
827 /// Examine constraint string and operand type and determine a weight value.
828 /// The operand object must already have been set up with the operand type.
830 getSingleConstraintMatchWeight(AsmOperandInfo &info,
831 const char *constraint) const override;
832
833 std::pair<unsigned, const TargetRegisterClass *>
834 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
835 StringRef Constraint, MVT VT) const override;
836
837 const char *LowerXConstraint(EVT ConstraintVT) const override;
838
839 void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
840 std::vector<SDValue> &Ops,
841 SelectionDAG &DAG) const override;
842
844 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
845 if (ConstraintCode == "Q")
847 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
848 // followed by llvm_unreachable so we'll leave them unimplemented in
849 // the backend for now.
850 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
851 }
852
853 /// Handle Lowering flag assembly outputs.
854 SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
855 const SDLoc &DL,
856 const AsmOperandInfo &Constraint,
857 SelectionDAG &DAG) const override;
858
859 bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const override;
860 bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;
861 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
862 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
863 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
864 bool getIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
865 SDValue &Offset, SelectionDAG &DAG) const;
866 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
868 SelectionDAG &DAG) const override;
869 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
871 SelectionDAG &DAG) const override;
872 bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
873 bool IsPre, MachineRegisterInfo &MRI) const override;
874
875 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
876 SelectionDAG &DAG) const override;
877 void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
878 SelectionDAG &DAG) const;
879 void ReplaceExtractSubVectorResults(SDNode *N,
880 SmallVectorImpl<SDValue> &Results,
881 SelectionDAG &DAG) const;
882 void ReplaceGetActiveLaneMaskResults(SDNode *N,
883 SmallVectorImpl<SDValue> &Results,
884 SelectionDAG &DAG) const;
885
886 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
887
888 void finalizeLowering(MachineFunction &MF) const override;
889
890 bool shouldLocalize(const MachineInstr &MI,
891 const TargetTransformInfo *TTI) const override;
892
893 bool SimplifyDemandedBitsForTargetNode(SDValue Op,
894 const APInt &OriginalDemandedBits,
895 const APInt &OriginalDemandedElts,
896 KnownBits &Known,
897 TargetLoweringOpt &TLO,
898 unsigned Depth) const override;
899
900 bool canCreateUndefOrPoisonForTargetNode(SDValue Op,
901 const APInt &DemandedElts,
902 const SelectionDAG &DAG,
903 bool PoisonOnly, bool ConsiderFlags,
904 unsigned Depth) const override;
905
906 bool isTargetCanonicalConstantNode(SDValue Op) const override;
907
908 // With the exception of data-predicate transitions, no instructions are
909 // required to cast between legal scalable vector types. However:
910 // 1. Packed and unpacked types have different bit lengths, meaning BITCAST
911 // is not universally useable.
912 // 2. Most unpacked integer types are not legal and thus integer extends
913 // cannot be used to convert between unpacked and packed types.
914 // These can make "bitcasting" a multiphase process. REINTERPRET_CAST is used
915 // to transition between unpacked and packed types of the same element type,
916 // with BITCAST used otherwise.
917 // This function does not handle predicate bitcasts.
918 SDValue getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const;
919
920 // Returns the runtime value for PSTATE.SM by generating a call to
921 // __arm_sme_state.
922 SDValue getRuntimePStateSM(SelectionDAG &DAG, SDValue Chain, SDLoc DL,
923 EVT VT) const;
924
925 bool preferScalarizeSplat(SDNode *N) const override;
926
927 unsigned getMinimumJumpTableEntries() const override;
928
929 bool shouldScalarizeBinop(SDValue VecOp) const override {
930 return VecOp.getOpcode() == ISD::SETCC;
931 }
932
933 bool hasMultipleConditionRegisters(EVT VT) const override {
934 return VT.isScalableVector();
935 }
936};
937
938namespace AArch64 {
939FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
940 const TargetLibraryInfo *libInfo,
941 const LibcallLoweringInfo *libcallLowering);
942} // end namespace AArch64
943
944} // end namespace llvm
945
946#endif
unsigned const MachineRegisterInfo * MRI
return SDValue()
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
PowerPC Reduce CR logical Operation
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG)
Lower SRA_PARTS and friends, which return two i32 values and take a 2 x i32 value to shift plus a shi...
static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
Value * RHS
Value * LHS
static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
SDValue changeStreamingMode(SelectionDAG &DAG, SDLoc DL, bool Enable, SDValue Chain, SDValue InGlue, unsigned Condition, bool InsertVectorLengthCheck=false) const
If a change in streaming mode is required on entry to/return from a function call it emits and return...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override
Return true if it is profitable to fold a pair of shifts into a mask.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset) const override
Return true if it is profitable to reduce a load to a smaller type.
Value * getIRStackGuard(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
Value * getSafeStackPointerLocation(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
void insertSSPDeclarations(Module &M, const LibcallLoweringInfo &Libcalls) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
unsigned getVaListSizeInBits(const DataLayout &DL) const override
Returns the size of the platform's va_list object.
MachineBasicBlock * EmitZAInstr(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const override
Return the prefered common base offset.
bool shouldExpandCttzElements(EVT VT) const override
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
MachineBasicBlock * EmitInitTPIDR2Object(MachineInstr &MI, MachineBasicBlock *BB) const
bool lowerInterleavedStore(Instruction *Store, Value *Mask, ShuffleVectorInst *SVI, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved store into a stN intrinsic.
MachineBasicBlock * EmitTileLoad(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL, bool UseScalable) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override
Return true if it is profitable to fold a pair of shifts into a mask.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
bool preferSelectsOverBooleanArithmetic(EVT VT) const override
Should we prefer selects to doing arithmetic on boolean types.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool shouldOptimizeMulOverflowWithZeroHighBits(LLVMContext &Context, EVT VT) const override
bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override
bool shouldInsertTrailingSeqCstFenceForAtomicStore(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert a seq_cst trailing fence without reducing the or...
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool shouldRemoveRedundantExtend(SDValue Op) const override
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool supportPtrAuthBundles() const override
Return true if the target supports ptrauth operand bundles.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool lowerDeinterleaveIntrinsicToLoad(Instruction *Load, Value *Mask, IntrinsicInst *DI) const override
Lower a deinterleave intrinsic to a target specific load intrinsic.
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
unsigned getVectorIdxWidth(const DataLayout &DL) const override
Returns the type to be used for the index operand vector operations.
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const override
bool isOpSuitableForLSE128(const Instruction *I) const
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
void fixupPtrauthDiscriminator(MachineInstr &MI, MachineBasicBlock *BB, MachineOperand &IntDiscOp, MachineOperand &AddrDiscOp, const TargetRegisterClass *AddrDiscRC) const
Replace (0, vreg) discriminator components with the operands of blend or with (immediate,...
bool lowerInterleavedLoad(Instruction *Load, Value *Mask, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved load into a ldN intrinsic.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool fallBackToDAGISel(const Instruction &Inst) const override
bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override
Return true if the target has native support for the specified value type and it is 'desirable' to us...
bool isLegalAddScalableImmediate(int64_t) const override
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Try to convert math with an overflow comparison into the corresponding DAG node operation.
Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const override
Create the IR node for the given complex deinterleaving operation.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
MachineBasicBlock * EmitCheckMatchingVL(MachineInstr &MI, MachineBasicBlock *MBB) const
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const
Returns true if VecTy is a legal interleaved access type.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
MachineBasicBlock * EmitLoweredCatchRet(MachineInstr &MI, MachineBasicBlock *BB) const
bool isComplexDeinterleavingSupported() const override
Does this target support complex deinterleaving.
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const override
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
bool isOpSuitableForRCPC3(const Instruction *I) const
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isCheapToSpeculateCttz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
unsigned getRedZoneSize(const Function &F) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
MachineBasicBlock * EmitZTInstr(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, bool Op0IsDef) const
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
MachineBasicBlock * EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const
bool isCheapToSpeculateCtlz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
MachineBasicBlock * EmitEntryPStateSM(MachineInstr &MI, MachineBasicBlock *BB) const
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const override
Control the following reassociation of operands: (op (op x, c1), y) -> (op (op x, y),...
bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override
In AArch64, true if FEAT_CPA is present.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &FuncAttributes) const override
LLT returning variant.
bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x),...
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
MachineBasicBlock * EmitAllocateSMESaveBuffer(MachineInstr &MI, MachineBasicBlock *BB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool needsFixedCatchObjects() const override
Used for exception handling on Win64.
MachineBasicBlock * EmitAllocateZABuffer(MachineInstr &MI, MachineBasicBlock *BB) const
const AArch64TargetMachine & getTM() const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool generateFMAsInMachineCombiner(EVT VT, CodeGenOptLevel OptLevel) const override
bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const override
Does this target support complex deinterleaving with the given operation and type.
bool isOpSuitableForLDPSTP(const Instruction *I) const
AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI)
MachineBasicBlock * EmitGetSMESaveSize(MachineInstr &MI, MachineBasicBlock *BB) const
bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool shouldConsiderGEPOffsetSplit() const override
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isVectorClearMaskLegal(ArrayRef< int > M, EVT VT) const override
Similar to isShuffleMaskLegal.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool useLoadStackGuardNode(const Module &M) const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
ArrayRef< MCPhysReg > getRoundingControlRegisters() const override
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
bool lowerInterleaveIntrinsicToStore(Instruction *Store, Value *Mask, ArrayRef< Value * > InterleaveValues) const override
Lower an interleave intrinsic to a target specific store intrinsic.
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool enableAggressiveFMAFusion(EVT VT) const override
Enable aggressive FMA fusion on targets that want it.
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
MachineBasicBlock * EmitDynamicProbedAlloc(MachineInstr &MI, MachineBasicBlock *MBB) const
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
bool supportKCFIBundles() const override
Return true if the target supports kcfi operand bundles.
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON=false) const
bool mergeStoresAfterLegalization(EVT VT) const override
SVE code generation for fixed length vectors does not custom lower BUILD_VECTOR.
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
CCState - This class holds information needed while lowering arguments and return values.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Tracks which library functions to use for a particular subtarget.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Machine Value Type.
static MVT getIntegerVT(unsigned BitWidth)
Instructions::iterator instr_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
MachineOperand class - Representation of each machine instruction operand.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
TargetLowering(const TargetLowering &)=delete
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
A range adaptor for a pair of iterators.
ArrayRef< MCPhysReg > getFPRArgRegs()
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPCR.
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
const unsigned RoundingBitsPos
const uint64_t ReservedFPControlBits
ArrayRef< MCPhysReg > getGPRArgRegs()
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:818
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
AtomicOrdering
Atomic ordering for LLVM's memory model.
TargetTransformInfo TTI
CombineLevel
Definition DAGCombine.h:15
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
Definition DWP.h:27
@ Enable
Enable colors.
Definition WithColor.h:47
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Represent subnormal handling kind for floating point instruction inputs and outputs.
Extended Value Type.
Definition ValueTypes.h:35
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition ValueTypes.h:174
These are IR-level optimization flags that may be propagated to SDNodes.