LLVM 22.0.0git
AArch64ISelLowering.h
Go to the documentation of this file.
1//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that AArch64 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16
21#include "llvm/IR/CallingConv.h"
22#include "llvm/IR/Instruction.h"
23
24namespace llvm {
25
27
28namespace AArch64 {
29/// Possible values of current rounding mode, which is specified in bits
30/// 23:22 of FPCR.
32 RN = 0, // Round to Nearest
33 RP = 1, // Round towards Plus infinity
34 RM = 2, // Round towards Minus infinity
35 RZ = 3, // Round towards Zero
36 rmMask = 3 // Bit mask selecting rounding mode
37};
38
39// Bit position of rounding mode bits in FPCR.
40const unsigned RoundingBitsPos = 22;
41
42// Reserved bits should be preserved when modifying FPCR.
43const uint64_t ReservedFPControlBits = 0xfffffffff80040f8;
44
45// Registers used to pass function arguments.
48
49/// Maximum allowed number of unprobed bytes above SP at an ABI
50/// boundary.
51const unsigned StackProbeMaxUnprobedStack = 1024;
52
53/// Maximum number of iterations to unroll for a constant size probing loop.
54const unsigned StackProbeMaxLoopUnroll = 4;
55
56} // namespace AArch64
57
58namespace ARM64AS {
59enum : unsigned { PTR32_SPTR = 270, PTR32_UPTR = 271, PTR64 = 272 };
60}
61
62class AArch64Subtarget;
63
65public:
66 explicit AArch64TargetLowering(const TargetMachine &TM,
67 const AArch64Subtarget &STI);
68
69 const AArch64TargetMachine &getTM() const;
70
71 /// Control the following reassociation of operands: (op (op x, c1), y) -> (op
72 /// (op x, y), c1) where N0 is (op x, c1) and N1 is y.
74 SDValue N1) const override;
75
76 /// Selects the correct CCAssignFn for a given CallingConvention value.
77 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
78
79 /// Selects the correct CCAssignFn for a given CallingConvention value.
81
82 /// Determine which of the bits specified in Mask are known to be either zero
83 /// or one and return them in the KnownZero/KnownOne bitsets.
85 const APInt &DemandedElts,
86 const SelectionDAG &DAG,
87 unsigned Depth = 0) const override;
88
90 const APInt &DemandedElts,
91 const SelectionDAG &DAG,
92 unsigned Depth) const override;
93
94 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
95 if ((AS == ARM64AS::PTR32_SPTR) || (AS == ARM64AS::PTR32_UPTR)) {
96 // These are 32-bit pointers created using the `__ptr32` extension or
97 // similar. They are handled by marking them as being in a different
98 // address space, and will be extended to 64-bits when used as the target
99 // of a load or store operation, or cast to a 64-bit pointer type.
100 return MVT::i32;
101 } else {
102 // Returning i64 unconditionally here (i.e. even for ILP32) means that the
103 // *DAG* representation of pointers will always be 64-bits. They will be
104 // truncated and extended when transferred to memory, but the 64-bit DAG
105 // allows us to use AArch64's addressing modes much more easily.
106 return MVT::i64;
107 }
108 }
109
110 unsigned getVectorIdxWidth(const DataLayout &DL) const override {
111 // The VectorIdx type is i64, with both normal and ilp32.
112 return 64;
113 }
114
116 const APInt &DemandedElts,
117 TargetLoweringOpt &TLO) const override;
118
119 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
120
121 /// Returns true if the target allows unaligned memory accesses of the
122 /// specified type.
124 EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
126 unsigned *Fast = nullptr) const override;
127 /// LLT variant.
128 bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace,
129 Align Alignment,
131 unsigned *Fast = nullptr) const override;
132
133 /// Provide custom lowering hooks for some operations.
134 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
135
136 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
137
138 /// This method returns a target specific FastISel object, or null if the
139 /// target does not support "fast" ISel.
141 const TargetLibraryInfo *libInfo) const override;
142
143 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
144
145 bool isFPImmLegal(const APFloat &Imm, EVT VT,
146 bool ForCodeSize) const override;
147
148 /// Return true if the given shuffle mask can be codegen'd directly, or if it
149 /// should be stack expanded.
150 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
151
152 /// Similar to isShuffleMaskLegal. Return true is the given 'select with zero'
153 /// shuffle mask can be codegen'd directly.
154 bool isVectorClearMaskLegal(ArrayRef<int> M, EVT VT) const override;
155
156 /// Return the ISD::SETCC ValueType.
158 EVT VT) const override;
159
161
163 MachineBasicBlock *BB) const;
164
166 MachineBasicBlock *BB) const;
167
169 MachineBasicBlock *MBB) const;
170
172 MachineBasicBlock *MBB) const;
173
174 MachineBasicBlock *EmitTileLoad(unsigned Opc, unsigned BaseReg,
176 MachineBasicBlock *BB) const;
178 MachineBasicBlock *EmitZAInstr(unsigned Opc, unsigned BaseReg,
179 MachineInstr &MI, MachineBasicBlock *BB) const;
181 unsigned Opcode, bool Op0IsDef) const;
183
184 // Note: The following group of functions are only used as part of the old SME
185 // ABI lowering. They will be removed once -aarch64-new-sme-abi=true is the
186 // default.
188 MachineBasicBlock *BB) const;
190 MachineBasicBlock *BB) const;
192 MachineBasicBlock *BB) const;
194 MachineBasicBlock *BB) const;
196 MachineBasicBlock *BB) const;
197
198 /// Replace (0, vreg) discriminator components with the operands of blend
199 /// or with (immediate, NoRegister) when possible.
201 MachineOperand &IntDiscOp,
202 MachineOperand &AddrDiscOp,
203 const TargetRegisterClass *AddrDiscRC) const;
204
207 MachineBasicBlock *MBB) const override;
208
209 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I,
210 MachineFunction &MF,
211 unsigned Intrinsic) const override;
212
213 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT,
214 std::optional<unsigned> ByteOffset) const override;
215
216 bool shouldRemoveRedundantExtend(SDValue Op) const override;
217
218 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
219 bool isTruncateFree(EVT VT1, EVT VT2) const override;
220
221 bool isProfitableToHoist(Instruction *I) const override;
222
223 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
224 bool isZExtFree(EVT VT1, EVT VT2) const override;
225 bool isZExtFree(SDValue Val, EVT VT2) const override;
226
228 Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override;
229
230 bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override;
231
232 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
233
234 bool lowerInterleavedLoad(Instruction *Load, Value *Mask,
236 ArrayRef<unsigned> Indices, unsigned Factor,
237 const APInt &GapMask) const override;
238 bool lowerInterleavedStore(Instruction *Store, Value *Mask,
239 ShuffleVectorInst *SVI, unsigned Factor,
240 const APInt &GapMask) const override;
241
243 IntrinsicInst *DI) const override;
244
246 Instruction *Store, Value *Mask,
247 ArrayRef<Value *> InterleaveValues) const override;
248
249 bool isLegalAddImmediate(int64_t) const override;
250 bool isLegalAddScalableImmediate(int64_t) const override;
251 bool isLegalICmpImmediate(int64_t) const override;
252
254 SDValue ConstNode) const override;
255
256 bool shouldConsiderGEPOffsetSplit() const override;
257
258 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
259 const AttributeList &FuncAttributes) const override;
260
262 const AttributeList &FuncAttributes) const override;
263
264 /// Return true if the addressing mode represented by AM is legal for this
265 /// target, for a load/store of the specified type.
266 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
267 unsigned AS,
268 Instruction *I = nullptr) const override;
269
270 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
271 int64_t MaxOffset) const override;
272
273 /// Return true if an FMA operation is faster than a pair of fmul and fadd
274 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
275 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
277 EVT VT) const override;
278 bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
279
281 CodeGenOptLevel OptLevel) const override;
282
283 /// Return true if the target has native support for
284 /// the specified value type and it is 'desirable' to use the type for the
285 /// given node type.
286 bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
287
288 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
290
291 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
293 CombineLevel Level) const override;
294
295 bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override {
296 return false;
297 }
298
299 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
300 bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
301
302 /// Return true if it is profitable to fold a pair of shifts into a mask.
303 bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override;
304
305 /// Return true if it is profitable to fold a pair of shifts into a mask.
307 EVT VT = Y.getValueType();
308
309 if (VT.isVector())
310 return false;
311
312 return VT.getScalarSizeInBits() <= 64;
313 }
314
315 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
316 unsigned SelectOpcode, SDValue X,
317 SDValue Y) const override;
318
319 /// Returns true if it is beneficial to convert a load of a constant
320 /// to just the constant itself.
322 Type *Ty) const override;
323
324 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
325 /// with this index.
326 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
327 unsigned Index) const override;
328
329 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
330 bool MathUsed) const override {
331 // Using overflow ops for overflow checks only should beneficial on
332 // AArch64.
333 return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
334 }
335
336 // Return true if the target wants to optimize the mul overflow intrinsic
337 // for the given \p VT.
339 EVT VT) const override;
340
341 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
342 AtomicOrdering Ord) const override;
343 Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
344 AtomicOrdering Ord) const override;
345
346 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
347
348 bool isOpSuitableForLDPSTP(const Instruction *I) const;
349 bool isOpSuitableForLSE128(const Instruction *I) const;
350 bool isOpSuitableForRCPC3(const Instruction *I) const;
351 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
352 bool
354
356 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
360 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
361
364
365 bool useLoadStackGuardNode(const Module &M) const override;
367 getPreferredVectorAction(MVT VT) const override;
368
369 /// If the target has a standard location for the stack protector cookie,
370 /// returns the address of that location. Otherwise, returns nullptr.
371 Value *getIRStackGuard(IRBuilderBase &IRB) const override;
372
373 void insertSSPDeclarations(Module &M) const override;
374
375 /// If the target has a standard location for the unsafe stack pointer,
376 /// returns the address of that location. Otherwise, returns nullptr.
377 Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const override;
378
379 /// If a physical register, this returns the register that receives the
380 /// exception address on entry to an EH pad.
382 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
383
384 /// If a physical register, this returns the register that receives the
385 /// exception typeid on entry to a landing pad.
387 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
388
389 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
390
391 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
392 const MachineFunction &MF) const override;
393
394 bool isCheapToSpeculateCttz(Type *) const override {
395 return true;
396 }
397
398 bool isCheapToSpeculateCtlz(Type *) const override {
399 return true;
400 }
401
402 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
403
404 bool hasAndNotCompare(SDValue V) const override {
405 // We can use bics for any scalar.
406 return V.getValueType().isScalarInteger();
407 }
408
409 bool hasAndNot(SDValue Y) const override {
410 EVT VT = Y.getValueType();
411
412 if (!VT.isVector())
413 return hasAndNotCompare(Y);
414
415 if (VT.isScalableVector())
416 return true;
417
418 return VT.getFixedSizeInBits() >= 64; // vector 'bic'
419 }
420
423 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
424 SelectionDAG &DAG) const override;
425
428 unsigned ExpansionFactor) const override;
429
431 unsigned KeptBits) const override {
432 // For vectors, we don't have a preference..
433 if (XVT.isVector())
434 return false;
435
436 auto VTIsOk = [](EVT VT) -> bool {
437 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
438 VT == MVT::i64;
439 };
440
441 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
442 // XVT will be larger than KeptBitsVT.
443 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
444 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
445 }
446
447 bool preferIncOfAddToSubOfNot(EVT VT) const override;
448
449 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
450
451 bool preferSelectsOverBooleanArithmetic(EVT VT) const override;
452
453 bool isComplexDeinterleavingSupported() const override;
455 ComplexDeinterleavingOperation Operation, Type *Ty) const override;
456
459 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
460 Value *Accumulator = nullptr) const override;
461
462 bool supportSplitCSR(MachineFunction *MF) const override {
464 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
465 }
466 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
468 MachineBasicBlock *Entry,
469 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
470
471 bool supportSwiftError() const override {
472 return true;
473 }
474
475 bool supportPtrAuthBundles() const override { return true; }
476
477 bool supportKCFIBundles() const override { return true; }
478
481 const TargetInstrInfo *TII) const override;
482
483 /// Enable aggressive FMA fusion on targets that want it.
484 bool enableAggressiveFMAFusion(EVT VT) const override;
485
486 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override {
487 return true;
488 }
489
490 /// Returns the size of the platform's va_list object.
491 unsigned getVaListSizeInBits(const DataLayout &DL) const override;
492
493 /// Returns true if \p VecTy is a legal interleaved access type. This
494 /// function checks the vector element type and the overall width of the
495 /// vector.
497 bool &UseScalable) const;
498
499 /// Returns the number of interleaved accesses that will be generated when
500 /// lowering accesses of the given type.
501 unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL,
502 bool UseScalable) const;
503
505 const Instruction &I) const override;
506
508 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
509 const DataLayout &DL) const override;
510
511 /// Used for exception handling on Win64.
512 bool needsFixedCatchObjects() const override;
513
514 bool fallBackToDAGISel(const Instruction &Inst) const override;
515
516 /// SVE code generation for fixed length vectors does not custom lower
517 /// BUILD_VECTOR. This makes BUILD_VECTOR legalisation a source of stores to
518 /// merge. However, merging them creates a BUILD_VECTOR that is just as
519 /// illegal as the original, thus leading to an infinite legalisation loop.
520 /// NOTE: Once BUILD_VECTOR is legal or can be custom lowered for all legal
521 /// vector types this override can be removed.
522 bool mergeStoresAfterLegalization(EVT VT) const override;
523
524 // If the platform/function should have a redzone, return the size in bytes.
525 unsigned getRedZoneSize(const Function &F) const {
526 if (F.hasFnAttribute(Attribute::NoRedZone))
527 return 0;
528 return 128;
529 }
530
531 bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const;
533
535 bool AllowUnknown = false) const override;
536
537 bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
538
539 bool shouldExpandCttzElements(EVT VT) const override;
540
541 bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override;
542
543 /// If a change in streaming mode is required on entry to/return from a
544 /// function call it emits and returns the corresponding SMSTART or SMSTOP
545 /// node. \p Condition should be one of the enum values from
546 /// AArch64SME::ToggleCondition.
548 SDValue Chain, SDValue InGlue, unsigned Condition,
549 bool InsertVectorLengthCheck = false) const;
550
551 bool isVScaleKnownToBeAPowerOfTwo() const override { return true; }
552
553 // Normally SVE is only used for byte size vectors that do not fit within a
554 // NEON vector. This changes when OverrideNEON is true, allowing SVE to be
555 // used for 64bit and 128bit vectors as well.
556 bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
557
558 // Follow NEON ABI rules even when using SVE for fixed length vectors.
560 EVT VT) const override;
563 EVT VT) const override;
565 CallingConv::ID CC, EVT VT,
566 EVT &IntermediateVT,
567 unsigned &NumIntermediates,
568 MVT &RegisterVT) const override;
569
570 /// True if stack clash protection is enabled for this functions.
571 bool hasInlineStackProbe(const MachineFunction &MF) const override;
572
573 /// In AArch64, true if FEAT_CPA is present. Allows pointer arithmetic
574 /// semantics to be preserved for instruction selection.
575 bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override;
576
577private:
578 /// Keep a pointer to the AArch64Subtarget around so that we can
579 /// make the right decision when generating code for different targets.
580 const AArch64Subtarget *Subtarget;
581
582 bool isExtFreeImpl(const Instruction *Ext) const override;
583
584 void addTypeForNEON(MVT VT);
585 void addTypeForFixedLengthSVE(MVT VT);
586 void addDRType(MVT VT);
587 void addQRType(MVT VT);
588
589 bool shouldExpandBuildVectorWithShuffles(EVT, unsigned) const override;
590
591 SDValue lowerEHPadEntry(SDValue Chain, SDLoc const &DL,
592 SelectionDAG &DAG) const override;
593
594 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
595 bool isVarArg,
597 const SDLoc &DL, SelectionDAG &DAG,
598 SmallVectorImpl<SDValue> &InVals) const override;
599
600 void AdjustInstrPostInstrSelection(MachineInstr &MI,
601 SDNode *Node) const override;
602
603 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
604 SmallVectorImpl<SDValue> &InVals) const override;
605
607 CallingConv::ID CallConv, bool isVarArg,
608 const SmallVectorImpl<CCValAssign> &RVLocs,
609 const SDLoc &DL, SelectionDAG &DAG,
610 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
611 SDValue ThisVal, bool RequiresSMChange) const;
612
615 SDValue LowerStore128(SDValue Op, SelectionDAG &DAG) const;
617 SDValue LowerFMUL(SDValue Op, SelectionDAG &DAG) const;
618 SDValue LowerFMA(SDValue Op, SelectionDAG &DAG) const;
619
622
624
625 SDValue LowerVECTOR_COMPRESS(SDValue Op, SelectionDAG &DAG) const;
626
628 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
629 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
630
631 bool
632 isEligibleForTailCallOptimization(const CallLoweringInfo &CLI) const;
633
634 /// Finds the incoming stack arguments which overlap the given fixed stack
635 /// object and incorporates their load into the current chain. This prevents
636 /// an upcoming store from clobbering the stack argument before it's used.
637 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
638 MachineFrameInfo &MFI, int ClobberedFI) const;
639
640 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
641
642 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
643 SDValue &Chain) const;
644
645 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
646 bool isVarArg,
648 LLVMContext &Context, const Type *RetTy) const override;
649
650 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
652 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
653 SelectionDAG &DAG) const override;
654
656 unsigned Flag) const;
658 unsigned Flag) const;
660 unsigned Flag) const;
662 unsigned Flag) const;
664 unsigned Flag) const;
665 template <class NodeTy>
666 SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
667 template <class NodeTy>
668 SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
669 template <class NodeTy>
670 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
671 template <class NodeTy>
672 SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
673 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
674 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
675 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
676 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
677 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
678 SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
679 const SDLoc &DL, SelectionDAG &DAG) const;
680 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
681 SelectionDAG &DAG) const;
682 SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
683 SDValue LowerPtrAuthGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
684 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
687 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
690 SDValue TVal, SDValue FVal,
692 SDNodeFlags Flags, const SDLoc &dl,
693 SelectionDAG &DAG) const;
694 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
696 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
697 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
698 SDValue LowerBRIND(SDValue Op, SelectionDAG &DAG) const;
699 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
700 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
701 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
702 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
703 SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
708 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
710 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
711 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
712 SDValue LowerGET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
713 SDValue LowerSET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
714 SDValue LowerRESET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
715 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
717 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
719 SDValue LowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
721 SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
722 SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
723 SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
724 unsigned NewOp) const;
725 SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
726 SDValue LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
729 SDValue LowerVECTOR_DEINTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
730 SDValue LowerVECTOR_INTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
731 SDValue LowerVECTOR_HISTOGRAM(SDValue Op, SelectionDAG &DAG) const;
732 SDValue LowerPARTIAL_REDUCE_MLA(SDValue Op, SelectionDAG &DAG) const;
733 SDValue LowerGET_ACTIVE_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
734 SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const;
736 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
739 SDValue LowerCTPOP_PARITY(SDValue Op, SelectionDAG &DAG) const;
741 SDValue LowerBitreverse(SDValue Op, SelectionDAG &DAG) const;
742 SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const;
744 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
745 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
747 SDValue LowerVectorFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
748 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
750 SDValue LowerVectorXRINT(SDValue Op, SelectionDAG &DAG) const;
751 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
753 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
754 SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
756 SDValue LowerLOOP_DEPENDENCE_MASK(SDValue Op, SelectionDAG &DAG) const;
758 SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
759 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
760 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
761 SDValue LowerVECREDUCE_MUL(SDValue Op, SelectionDAG &DAG) const;
762 SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
763 SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
764 SDValue LowerInlineDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
767
768 SDValue LowerAVG(SDValue Op, SelectionDAG &DAG, unsigned NewOp) const;
769
770 SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op,
771 SelectionDAG &DAG) const;
772 SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op,
773 SelectionDAG &DAG) const;
774 SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
775 SDValue LowerFixedLengthVectorMLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
776 SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const;
777 SDValue LowerPredReductionToSVE(SDValue ScalarOp, SelectionDAG &DAG) const;
778 SDValue LowerReductionToSVE(unsigned Opcode, SDValue ScalarOp,
779 SelectionDAG &DAG) const;
780 SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const;
781 SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const;
782 SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const;
783 SDValue LowerFixedLengthVectorMStoreToSVE(SDValue Op,
784 SelectionDAG &DAG) const;
785 SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op,
786 SelectionDAG &DAG) const;
787 SDValue LowerFixedLengthExtractVectorElt(SDValue Op, SelectionDAG &DAG) const;
788 SDValue LowerFixedLengthInsertVectorElt(SDValue Op, SelectionDAG &DAG) const;
789 SDValue LowerFixedLengthBitcastToSVE(SDValue Op, SelectionDAG &DAG) const;
790 SDValue LowerFixedLengthConcatVectorsToSVE(SDValue Op,
791 SelectionDAG &DAG) const;
792 SDValue LowerFixedLengthFPExtendToSVE(SDValue Op, SelectionDAG &DAG) const;
793 SDValue LowerFixedLengthFPRoundToSVE(SDValue Op, SelectionDAG &DAG) const;
794 SDValue LowerFixedLengthIntToFPToSVE(SDValue Op, SelectionDAG &DAG) const;
795 SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const;
796 SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op,
797 SelectionDAG &DAG) const;
798 SDValue LowerFixedLengthBuildVectorToSVE(SDValue Op, SelectionDAG &DAG) const;
799
800 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
801 SmallVectorImpl<SDNode *> &Created) const override;
802 SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
803 SmallVectorImpl<SDNode *> &Created) const override;
804 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
805 int &ExtraSteps, bool &UseOneConst,
806 bool Reciprocal) const override;
807 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
808 int &ExtraSteps) const override;
809 SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
810 const DenormalMode &Mode) const override;
811 SDValue getSqrtResultForDenormInput(SDValue Operand,
812 SelectionDAG &DAG) const override;
813 unsigned combineRepeatedFPDivisors() const override;
814
815 ConstraintType getConstraintType(StringRef Constraint) const override;
816 Register getRegisterByName(const char* RegName, LLT VT,
817 const MachineFunction &MF) const override;
818
819 /// Examine constraint string and operand type and determine a weight value.
820 /// The operand object must already have been set up with the operand type.
822 getSingleConstraintMatchWeight(AsmOperandInfo &info,
823 const char *constraint) const override;
824
825 std::pair<unsigned, const TargetRegisterClass *>
826 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
827 StringRef Constraint, MVT VT) const override;
828
829 const char *LowerXConstraint(EVT ConstraintVT) const override;
830
831 void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
832 std::vector<SDValue> &Ops,
833 SelectionDAG &DAG) const override;
834
836 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
837 if (ConstraintCode == "Q")
839 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
840 // followed by llvm_unreachable so we'll leave them unimplemented in
841 // the backend for now.
842 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
843 }
844
845 /// Handle Lowering flag assembly outputs.
846 SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
847 const SDLoc &DL,
848 const AsmOperandInfo &Constraint,
849 SelectionDAG &DAG) const override;
850
851 bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const override;
852 bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;
853 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
854 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
855 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
856 bool getIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
857 SDValue &Offset, SelectionDAG &DAG) const;
858 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
860 SelectionDAG &DAG) const override;
861 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
863 SelectionDAG &DAG) const override;
864 bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
865 bool IsPre, MachineRegisterInfo &MRI) const override;
866
867 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
868 SelectionDAG &DAG) const override;
869 void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
870 SelectionDAG &DAG) const;
871 void ReplaceExtractSubVectorResults(SDNode *N,
872 SmallVectorImpl<SDValue> &Results,
873 SelectionDAG &DAG) const;
874 void ReplaceGetActiveLaneMaskResults(SDNode *N,
875 SmallVectorImpl<SDValue> &Results,
876 SelectionDAG &DAG) const;
877
878 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
879
880 void finalizeLowering(MachineFunction &MF) const override;
881
882 bool shouldLocalize(const MachineInstr &MI,
883 const TargetTransformInfo *TTI) const override;
884
885 bool SimplifyDemandedBitsForTargetNode(SDValue Op,
886 const APInt &OriginalDemandedBits,
887 const APInt &OriginalDemandedElts,
888 KnownBits &Known,
889 TargetLoweringOpt &TLO,
890 unsigned Depth) const override;
891
892 bool canCreateUndefOrPoisonForTargetNode(SDValue Op,
893 const APInt &DemandedElts,
894 const SelectionDAG &DAG,
895 bool PoisonOnly, bool ConsiderFlags,
896 unsigned Depth) const override;
897
898 bool isTargetCanonicalConstantNode(SDValue Op) const override;
899
900 // With the exception of data-predicate transitions, no instructions are
901 // required to cast between legal scalable vector types. However:
902 // 1. Packed and unpacked types have different bit lengths, meaning BITCAST
903 // is not universally useable.
904 // 2. Most unpacked integer types are not legal and thus integer extends
905 // cannot be used to convert between unpacked and packed types.
906 // These can make "bitcasting" a multiphase process. REINTERPRET_CAST is used
907 // to transition between unpacked and packed types of the same element type,
908 // with BITCAST used otherwise.
909 // This function does not handle predicate bitcasts.
910 SDValue getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const;
911
912 // Returns the runtime value for PSTATE.SM by generating a call to
913 // __arm_sme_state.
914 SDValue getRuntimePStateSM(SelectionDAG &DAG, SDValue Chain, SDLoc DL,
915 EVT VT) const;
916
917 bool preferScalarizeSplat(SDNode *N) const override;
918
919 unsigned getMinimumJumpTableEntries() const override;
920
921 bool softPromoteHalfType() const override { return true; }
922
923 bool shouldScalarizeBinop(SDValue VecOp) const override {
924 return VecOp.getOpcode() == ISD::SETCC;
925 }
926
927 bool hasMultipleConditionRegisters(EVT VT) const override {
928 return VT.isScalableVector();
929 }
930};
931
932namespace AArch64 {
933FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
934 const TargetLibraryInfo *libInfo);
935} // end namespace AArch64
936
937} // end namespace llvm
938
939#endif
unsigned const MachineRegisterInfo * MRI
return SDValue()
const TargetInstrInfo & TII
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
IRTranslator LLVM IR MI
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
PowerPC Reduce CR logical Operation
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG)
Lower SRA_PARTS and friends, which return two i32 values and take a 2 x i32 value to shift plus a shi...
static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
Value * RHS
Value * LHS
static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
SDValue changeStreamingMode(SelectionDAG &DAG, SDLoc DL, bool Enable, SDValue Chain, SDValue InGlue, unsigned Condition, bool InsertVectorLengthCheck=false) const
If a change in streaming mode is required on entry to/return from a function call it emits and return...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override
Return true if it is profitable to fold a pair of shifts into a mask.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset) const override
Return true if it is profitable to reduce a load to a smaller type.
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
unsigned getVaListSizeInBits(const DataLayout &DL) const override
Returns the size of the platform's va_list object.
MachineBasicBlock * EmitZAInstr(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const override
Return the prefered common base offset.
bool shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert a trailing fence without reducing the ordering f...
bool shouldExpandCttzElements(EVT VT) const override
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
MachineBasicBlock * EmitInitTPIDR2Object(MachineInstr &MI, MachineBasicBlock *BB) const
bool lowerInterleavedStore(Instruction *Store, Value *Mask, ShuffleVectorInst *SVI, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved store into a stN intrinsic.
MachineBasicBlock * EmitTileLoad(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL, bool UseScalable) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override
Return true if it is profitable to fold a pair of shifts into a mask.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
bool preferSelectsOverBooleanArithmetic(EVT VT) const override
Should we prefer selects to doing arithmetic on boolean types.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool shouldOptimizeMulOverflowWithZeroHighBits(LLVMContext &Context, EVT VT) const override
bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool shouldRemoveRedundantExtend(SDValue Op) const override
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool supportPtrAuthBundles() const override
Return true if the target supports ptrauth operand bundles.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool lowerDeinterleaveIntrinsicToLoad(Instruction *Load, Value *Mask, IntrinsicInst *DI) const override
Lower a deinterleave intrinsic to a target specific load intrinsic.
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
unsigned getVectorIdxWidth(const DataLayout &DL) const override
Returns the type to be used for the index operand vector operations.
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const override
bool isOpSuitableForLSE128(const Instruction *I) const
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
void fixupPtrauthDiscriminator(MachineInstr &MI, MachineBasicBlock *BB, MachineOperand &IntDiscOp, MachineOperand &AddrDiscOp, const TargetRegisterClass *AddrDiscRC) const
Replace (0, vreg) discriminator components with the operands of blend or with (immediate,...
bool lowerInterleavedLoad(Instruction *Load, Value *Mask, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved load into a ldN intrinsic.
bool fallBackToDAGISel(const Instruction &Inst) const override
bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override
Return true if the target has native support for the specified value type and it is 'desirable' to us...
bool isLegalAddScalableImmediate(int64_t) const override
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Try to convert math with an overflow comparison into the corresponding DAG node operation.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const override
Create the IR node for the given complex deinterleaving operation.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
MachineBasicBlock * EmitCheckMatchingVL(MachineInstr &MI, MachineBasicBlock *MBB) const
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const
Returns true if VecTy is a legal interleaved access type.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
MachineBasicBlock * EmitLoweredCatchRet(MachineInstr &MI, MachineBasicBlock *BB) const
bool isComplexDeinterleavingSupported() const override
Does this target support complex deinterleaving.
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const override
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
bool isOpSuitableForRCPC3(const Instruction *I) const
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isCheapToSpeculateCttz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
unsigned getRedZoneSize(const Function &F) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
MachineBasicBlock * EmitZTInstr(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, bool Op0IsDef) const
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
MachineBasicBlock * EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const
bool isCheapToSpeculateCtlz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
MachineBasicBlock * EmitEntryPStateSM(MachineInstr &MI, MachineBasicBlock *BB) const
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const override
Control the following reassociation of operands: (op (op x, c1), y) -> (op (op x, y),...
bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override
In AArch64, true if FEAT_CPA is present.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &FuncAttributes) const override
LLT returning variant.
bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x),...
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
MachineBasicBlock * EmitAllocateSMESaveBuffer(MachineInstr &MI, MachineBasicBlock *BB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool needsFixedCatchObjects() const override
Used for exception handling on Win64.
MachineBasicBlock * EmitAllocateZABuffer(MachineInstr &MI, MachineBasicBlock *BB) const
const AArch64TargetMachine & getTM() const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool generateFMAsInMachineCombiner(EVT VT, CodeGenOptLevel OptLevel) const override
bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const override
Does this target support complex deinterleaving with the given operation and type.
bool isOpSuitableForLDPSTP(const Instruction *I) const
AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI)
MachineBasicBlock * EmitGetSMESaveSize(MachineInstr &MI, MachineBasicBlock *BB) const
bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool shouldConsiderGEPOffsetSplit() const override
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isVectorClearMaskLegal(ArrayRef< int > M, EVT VT) const override
Similar to isShuffleMaskLegal.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool useLoadStackGuardNode(const Module &M) const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
ArrayRef< MCPhysReg > getRoundingControlRegisters() const override
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
bool lowerInterleaveIntrinsicToStore(Instruction *Store, Value *Mask, ArrayRef< Value * > InterleaveValues) const override
Lower an interleave intrinsic to a target specific store intrinsic.
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool enableAggressiveFMAFusion(EVT VT) const override
Enable aggressive FMA fusion on targets that want it.
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
MachineBasicBlock * EmitDynamicProbedAlloc(MachineInstr &MI, MachineBasicBlock *MBB) const
bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
bool supportKCFIBundles() const override
Return true if the target supports kcfi operand bundles.
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON=false) const
bool mergeStoresAfterLegalization(EVT VT) const override
SVE code generation for fixed length vectors does not custom lower BUILD_VECTOR.
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
CCState - This class holds information needed while lowering arguments and return values.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Machine Value Type.
static MVT getIntegerVT(unsigned BitWidth)
Instructions::iterator instr_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
MachineOperand class - Representation of each machine instruction operand.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
virtual bool softPromoteHalfType() const
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
TargetLowering(const TargetLowering &)=delete
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
A range adaptor for a pair of iterators.
ArrayRef< MCPhysReg > getFPRArgRegs()
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPCR.
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
const unsigned RoundingBitsPos
const uint64_t ReservedFPControlBits
ArrayRef< MCPhysReg > getGPRArgRegs()
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:807
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
AtomicOrdering
Atomic ordering for LLVM's memory model.
TargetTransformInfo TTI
CombineLevel
Definition DAGCombine.h:15
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
Definition DWP.h:27
@ Enable
Enable colors.
Definition WithColor.h:47
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Represent subnormal handling kind for floating point instruction inputs and outputs.
Extended Value Type.
Definition ValueTypes.h:35
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition ValueTypes.h:174
These are IR-level optimization flags that may be propagated to SDNodes.