LLVM 23.0.0git
X86ISelLowering.h
Go to the documentation of this file.
1//===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that X86 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
15#define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
16
17#include "X86SelectionDAGInfo.h"
20
21namespace llvm {
22 class X86Subtarget;
23 class X86TargetMachine;
24
25 namespace X86 {
26 /// Current rounding mode is represented in bits 11:10 of FPSR. These
27 /// values are same as corresponding constants for rounding mode used
28 /// in glibc.
30 rmInvalid = -1, // For handle Invalid rounding mode
31 rmToNearest = 0, // FE_TONEAREST
32 rmDownward = 1 << 10, // FE_DOWNWARD
33 rmUpward = 2 << 10, // FE_UPWARD
34 rmTowardZero = 3 << 10, // FE_TOWARDZERO
35 rmMask = 3 << 10 // Bit mask selecting rounding mode
36 };
37 }
38
39 /// Define some predicates that are used for node matching.
40 namespace X86 {
41 /// Returns true if Elt is a constant zero or floating point constant +0.0.
42 bool isZeroNode(SDValue Elt);
43
44 /// Returns true of the given offset can be
45 /// fit into displacement field of the instruction.
47 bool hasSymbolicDisplacement);
48
49 /// Determines whether the callee is required to pop its
50 /// own arguments. Callee pop is necessary to support tail calls.
51 bool isCalleePop(CallingConv::ID CallingConv,
52 bool is64Bit, bool IsVarArg, bool GuaranteeTCO);
53
54 /// If Op is a constant whose elements are all the same constant or
55 /// undefined, return true and return the constant value in \p SplatVal.
56 /// If we have undef bits that don't cover an entire element, we treat these
57 /// as zero if AllowPartialUndefs is set, else we fail and return false.
58 bool isConstantSplat(SDValue Op, APInt &SplatVal,
59 bool AllowPartialUndefs = true);
60
61 /// Check if Op is a load operation that could be folded into some other x86
62 /// instruction as a memory operand. Example: vpaddd (%rdi), %xmm0, %xmm0.
63 bool mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
64 bool AssumeSingleUse = false,
65 bool IgnoreAlignment = false);
66
67 /// Check if Op is a load operation that could be folded into a vector splat
68 /// instruction as a memory operand. Example: vbroadcastss 16(%rdi), %xmm2.
70 const X86Subtarget &Subtarget,
71 bool AssumeSingleUse = false);
72
73 /// Check if Op is a value that could be used to fold a store into some
74 /// other x86 instruction as a memory operand. Ex: pextrb $0, %xmm0, (%rdi).
76
77 /// Check if Op is an operation that could be folded into a zero extend x86
78 /// instruction.
80
81 /// True if the target supports the extended frame for async Swift
82 /// functions.
83 bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget,
84 const MachineFunction &MF);
85
86 /// Convert LLVM rounding mode to X86 rounding mode.
87 int getRoundingModeX86(unsigned RM);
88
89 } // end namespace X86
90
91 //===--------------------------------------------------------------------===//
92 // X86 Implementation of the TargetLowering interface
93 class X86TargetLowering final : public TargetLowering {
94 // Copying needed for an outgoing byval argument.
95 enum ByValCopyKind {
96 // Argument is already in the correct location, no copy needed.
97 NoCopy,
98 // Argument value is currently in the local stack frame, needs copying to
99 // outgoing arguemnt area.
100 CopyOnce,
101 // Argument value is currently in the outgoing argument area, but not at
102 // the correct offset, so needs copying via a temporary in local stack
103 // space.
104 CopyViaTemp,
105 };
106
107 public:
108 explicit X86TargetLowering(const X86TargetMachine &TM,
109 const X86Subtarget &STI);
110
111 unsigned getJumpTableEncoding() const override;
112 bool useSoftFloat() const override;
113
114 void markLibCallAttributes(MachineFunction *MF, unsigned CC,
115 ArgListTy &Args) const override;
116
117 MVT getScalarShiftAmountTy(const DataLayout &, EVT VT) const override {
118 return MVT::i8;
119 }
120
121 const MCExpr *
123 const MachineBasicBlock *MBB, unsigned uid,
124 MCContext &Ctx) const override;
125
126 /// Returns relocation base for the given PIC jumptable.
128 SelectionDAG &DAG) const override;
129 const MCExpr *
131 unsigned JTI, MCContext &Ctx) const override;
132
133 /// Return the desired alignment for ByVal aggregate
134 /// function arguments in the caller parameter area. For X86, aggregates
135 /// that contains are placed at 16-byte boundaries while the rest are at
136 /// 4-byte boundaries.
137 Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override;
138
139 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
140 const AttributeList &FuncAttributes) const override;
141
142 /// Returns true if it's safe to use load / store of the
143 /// specified type to expand memcpy / memset inline. This is mostly true
144 /// for all types except for some special cases. For example, on X86
145 /// targets without SSE2 f64 load / store are done with fldl / fstpl which
146 /// also does type conversion. Note the specified type doesn't have to be
147 /// legal as the hook is used before type legalization.
148 bool isSafeMemOpType(MVT VT) const override;
149
150 bool isMemoryAccessFast(EVT VT, Align Alignment) const;
151
152 /// Returns true if the target allows unaligned memory accesses of the
153 /// specified type. Returns whether it is "fast" in the last argument.
154 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment,
156 unsigned *Fast) const override;
157
158 /// This function returns true if the memory access is aligned or if the
159 /// target allows this specific unaligned memory access. If the access is
160 /// allowed, the optional final parameter returns a relative speed of the
161 /// access (as defined by the target).
163 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
164 Align Alignment,
166 unsigned *Fast = nullptr) const override;
167
168 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
169 const MachineMemOperand &MMO,
170 unsigned *Fast) const {
171 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(),
172 MMO.getAlign(), MMO.getFlags(), Fast);
173 }
174
175 /// Provide custom lowering hooks for some operations.
176 ///
177 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
178
179 /// Replace the results of node with an illegal result
180 /// type with new values built out of custom code.
181 ///
183 SelectionDAG &DAG) const override;
184
185 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
186
187 bool preferABDSToABSWithNSW(EVT VT) const override;
188
189 bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT,
190 EVT ExtVT) const override;
191
193 EVT VT) const override;
194
195 /// Return true if the target has native support for
196 /// the specified value type and it is 'desirable' to use the type for the
197 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
198 /// instruction encodings are longer and some i16 instructions are slow.
199 bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
200
201 /// Return true if the target has native support for the
202 /// specified value type and it is 'desirable' to use the type. e.g. On x86
203 /// i16 is legal, but undesirable since i16 instruction encodings are longer
204 /// and some i16 instructions are slow.
205 bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override;
206
207 /// Return prefered fold type, Abs if this is a vector, AddAnd if its an
208 /// integer, None otherwise.
211 const SDNode *SETCC0,
212 const SDNode *SETCC1) const override;
213
214 /// Return the newly negated expression if the cost is not expensive and
215 /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
216 /// do the negation.
218 bool LegalOperations, bool ForCodeSize,
220 unsigned Depth) const override;
221
224 MachineBasicBlock *MBB) const override;
225
226 /// Do not merge vector stores after legalization because that may conflict
227 /// with x86-specific store splitting optimizations.
228 bool mergeStoresAfterLegalization(EVT MemVT) const override {
229 return !MemVT.isVector();
230 }
231
232 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
233 const MachineFunction &MF) const override;
234
235 bool isCheapToSpeculateCttz(Type *Ty) const override;
236
237 bool isCheapToSpeculateCtlz(Type *Ty) const override;
238
239 bool isCtlzFast() const override;
240
241 bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override {
242 // If the pair to store is a mixture of float and int values, we will
243 // save two bitwise instructions and one float-to-int instruction and
244 // increase one store instruction. There is potentially a more
245 // significant benefit because it avoids the float->int domain switch
246 // for input value. So It is more likely a win.
247 if ((LTy.isFloatingPoint() && HTy.isInteger()) ||
248 (LTy.isInteger() && HTy.isFloatingPoint()))
249 return true;
250 // If the pair only contains int values, we will save two bitwise
251 // instructions and increase one store instruction (costing one more
252 // store buffer). Since the benefit is more blurred so we leave
253 // such pair out until we get testcase to prove it is a win.
254 return false;
255 }
256
257 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
258
259 bool hasAndNotCompare(SDValue Y) const override;
260
261 bool hasAndNot(SDValue Y) const override;
262
263 bool hasBitTest(SDValue X, SDValue Y) const override;
264
267 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
268 SelectionDAG &DAG) const override;
269
271 EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
272 const APInt &ShiftOrRotateAmt,
273 const std::optional<APInt> &AndMask) const override;
274
275 bool preferScalarizeSplat(SDNode *N) const override;
276
277 CondMergingParams
279 const Value *Rhs) const override;
280
281 bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override;
282
283 bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override;
284
285 bool
287 unsigned KeptBits) const override {
288 // For vectors, we don't have a preference..
289 if (XVT.isVector())
290 return false;
291
292 auto VTIsOk = [](EVT VT) -> bool {
293 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
294 VT == MVT::i64;
295 };
296
297 // We are ok with KeptBitsVT being byte/word/dword, what MOVS supports.
298 // XVT will be larger than KeptBitsVT.
299 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
300 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
301 }
302
305 unsigned ExpansionFactor) const override;
306
307 bool shouldSplatInsEltVarIndex(EVT VT) const override;
308
309 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override {
310 // Converting to sat variants holds little benefit on X86 as we will just
311 // need to saturate the value back using fp arithmatic.
313 }
314
315 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
316 return VT.isScalarInteger();
317 }
318
319 /// Vector-sized comparisons are fast using PCMPEQ + PMOVMSK or PTEST.
320 MVT hasFastEqualityCompare(unsigned NumBits) const override;
321
322 /// Return the value type to use for ISD::SETCC.
324 EVT VT) const override;
325
327 const APInt &DemandedElts,
328 TargetLoweringOpt &TLO) const override;
329
330 /// Determine which of the bits specified in Mask are known to be either
331 /// zero or one and return them in the KnownZero/KnownOne bitsets.
333 KnownBits &Known,
334 const APInt &DemandedElts,
335 const SelectionDAG &DAG,
336 unsigned Depth = 0) const override;
337
338 /// Determine the number of bits in the operation that are sign bits.
340 const APInt &DemandedElts,
341 const SelectionDAG &DAG,
342 unsigned Depth) const override;
343
345 const APInt &DemandedElts,
346 APInt &KnownUndef,
347 APInt &KnownZero,
348 TargetLoweringOpt &TLO,
349 unsigned Depth) const override;
350
352 const APInt &DemandedElts,
353 unsigned MaskIndex,
354 TargetLoweringOpt &TLO,
355 unsigned Depth) const;
356
358 const APInt &DemandedBits,
359 const APInt &DemandedElts,
360 KnownBits &Known,
361 TargetLoweringOpt &TLO,
362 unsigned Depth) const override;
363
365 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
366 SelectionDAG &DAG, unsigned Depth) const override;
367
369 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
370 UndefPoisonKind Kind, unsigned Depth) const override;
371
373 const APInt &DemandedElts,
374 const SelectionDAG &DAG,
375 UndefPoisonKind Kind,
376 bool ConsiderFlags,
377 unsigned Depth) const override;
378
379 bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
380 APInt &UndefElts, const SelectionDAG &DAG,
381 unsigned Depth) const override;
382
384 // Peek through bitcasts/extracts/inserts to see if we have a vector
385 // load/broadcast from memory.
386 while (Op.getOpcode() == ISD::BITCAST ||
387 Op.getOpcode() == ISD::EXTRACT_SUBVECTOR ||
388 (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
389 Op.getOperand(0).isUndef()))
390 Op = Op.getOperand(Op.getOpcode() == ISD::INSERT_SUBVECTOR ? 1 : 0);
391
392 return Op.getOpcode() == X86ISD::VBROADCAST_LOAD ||
393 Op.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD ||
394 (Op.getOpcode() == ISD::LOAD &&
397 }
398
399 bool isTargetCanonicalSelect(SDNode *N) const override;
400
401 const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
402
403 SDValue unwrapAddress(SDValue N) const override;
404
406
407 ConstraintType getConstraintType(StringRef Constraint) const override;
408
409 /// Examine constraint string and operand type and determine a weight value.
410 /// The operand object must already have been set up with the operand type.
412 getSingleConstraintMatchWeight(AsmOperandInfo &Info,
413 const char *Constraint) const override;
414
415 const char *LowerXConstraint(EVT ConstraintVT) const override;
416
417 /// Lower the specified operand into the Ops vector. If it is invalid, don't
418 /// add anything to Ops. If hasMemory is true it means one of the asm
419 /// constraint of the inline asm instruction being processed is 'm'.
421 std::vector<SDValue> &Ops,
422 SelectionDAG &DAG) const override;
423
425 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
426 if (ConstraintCode == "v")
428 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
429 }
430
431 /// Handle Lowering flag assembly outputs.
433 const SDLoc &DL,
434 const AsmOperandInfo &Constraint,
435 SelectionDAG &DAG) const override;
436
437 /// Given a physical register constraint
438 /// (e.g. {edx}), return the register number and the register class for the
439 /// register. This should only be used for C_Register constraints. On
440 /// error, this returns a register number of 0.
441 std::pair<unsigned, const TargetRegisterClass *>
443 StringRef Constraint, MVT VT) const override;
444
445 /// Return true if the addressing mode represented
446 /// by AM is legal for this target, for a load/store of the specified type.
447 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
448 Type *Ty, unsigned AS,
449 Instruction *I = nullptr) const override;
450
451 bool addressingModeSupportsTLS(const GlobalValue &GV) const override;
452
453 /// Return true if the specified immediate is legal
454 /// icmp immediate, that is the target has icmp instructions which can
455 /// compare a register against the immediate without having to materialize
456 /// the immediate into a register.
457 bool isLegalICmpImmediate(int64_t Imm) const override;
458
459 /// Return true if the specified immediate is legal
460 /// add immediate, that is the target has add instructions which can
461 /// add a register and the immediate without having to materialize
462 /// the immediate into a register.
463 bool isLegalAddImmediate(int64_t Imm) const override;
464
465 bool isLegalStoreImmediate(int64_t Imm) const override;
466
467 /// Add x86-specific opcodes to the default list.
468 bool isBinOp(unsigned Opcode) const override;
469
470 /// Returns true if the opcode is a commutative binary operation.
471 bool isCommutativeBinOp(unsigned Opcode) const override;
472
473 /// Return true if it's free to truncate a value of
474 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
475 /// register EAX to i16 by referencing its sub-register AX.
476 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
477 bool isTruncateFree(EVT VT1, EVT VT2) const override;
478
479 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
480
481 /// Return true if any actual instruction that defines a
482 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
483 /// register. This does not necessarily include registers defined in
484 /// unknown ways, such as incoming arguments, or copies from unknown
485 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
486 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
487 /// all instructions that define 32-bit values implicit zero-extend the
488 /// result out to 64 bits.
489 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
490 bool isZExtFree(EVT VT1, EVT VT2) const override;
491 bool isZExtFree(SDValue Val, EVT VT2) const override;
492
493 bool shouldConvertPhiType(Type *From, Type *To) const override;
494
495 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
496 /// extend node) is profitable.
497 bool isVectorLoadExtDesirable(SDValue) const override;
498
499 /// Return true if an FMA operation is faster than a pair of fmul and fadd
500 /// instructions. fmuladd intrinsics will be expanded to FMAs when this
501 /// method returns true, otherwise fmuladd is expanded to fmul + fadd.
503 EVT VT) const override;
504
505 /// Return true if it's profitable to narrow operations of type SrcVT to
506 /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not
507 /// from i32 to i16.
508 bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const override;
509
510 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
511 unsigned SelectOpcode, SDValue X,
512 SDValue Y) const override;
513
514 /// Given an intrinsic, checks if on the target the intrinsic will need to
515 /// map to a MemIntrinsicNode (touches memory). If this is the case, it
516 /// returns true and stores the intrinsic information into the IntrinsicInfo
517 /// that was passed to the function.
519 const CallBase &I, MachineFunction &MF,
520 unsigned Intrinsic) const override;
521
522 /// Returns true if the target can instruction select the
523 /// specified FP immediate natively. If false, the legalizer will
524 /// materialize the FP immediate as a load from a constant pool.
525 bool isFPImmLegal(const APFloat &Imm, EVT VT,
526 bool ForCodeSize) const override;
527
528 /// Targets can use this to indicate that they only support *some*
529 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
530 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to
531 /// be legal.
532 bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
533
534 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
535 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
536 /// constant pool entry.
537 bool isVectorClearMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
538
539 /// Returns true if lowering to a jump table is allowed.
540 bool areJTsAllowed(const Function *Fn) const override;
541
543 EVT ConditionVT) const override;
544
545 /// If true, then instruction selection should
546 /// seek to shrink the FP constant of the specified type to a smaller type
547 /// in order to save space and / or reduce runtime.
548 bool ShouldShrinkFPConstant(EVT VT) const override;
549
550 /// Return true if we believe it is correct and profitable to reduce the
551 /// load node to a smaller type.
552 bool
554 std::optional<unsigned> ByteOffset) const override;
555
556 /// Return true if the specified scalar FP type is computed in an SSE
557 /// register, not on the X87 floating point stack.
558 bool isScalarFPTypeInSSEReg(EVT VT) const;
559
560 /// Returns true if it is beneficial to convert a load of a constant
561 /// to just the constant itself.
563 Type *Ty) const override;
564
565 bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const override;
566
567 bool convertSelectOfConstantsToMath(EVT VT) const override;
568
569 bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
570 SDValue C) const override;
571
572 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
573 /// with this index.
574 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
575 unsigned Index) const override;
576
577 /// Scalar ops always have equal or better analysis/performance/power than
578 /// the vector equivalent, so this always makes sense if the scalar op is
579 /// supported.
580 bool shouldScalarizeBinop(SDValue) const override;
581
582 /// Extract of a scalar FP value from index 0 of a vector is free.
583 bool isExtractVecEltCheap(EVT VT, unsigned Index) const override {
584 EVT EltVT = VT.getScalarType();
585 return (EltVT == MVT::f32 || EltVT == MVT::f64) && Index == 0;
586 }
587
588 /// Overflow nodes should get combined/lowered to optimal instructions
589 /// (they should allow eliminating explicit compares by getting flags from
590 /// math ops).
591 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
592 bool MathUsed) const override;
593
594 bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem,
595 unsigned AddrSpace) const override {
596 // If we can replace more than 2 scalar stores, there will be a reduction
597 // in instructions even after we add a vector constant load.
598 return IsZero || NumElem > 2;
599 }
600
601 bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
602 const SelectionDAG &DAG,
603 const MachineMemOperand &MMO) const override;
604
605 bool isProfitableToCombineMinNumMaxNum(EVT VT) const override {
606 // X86 has instructions that correspond to cmp + select, so forming
607 // minnum/maxnum is not profitable.
608 return false;
609 }
610
611 Register getRegisterByName(const char* RegName, LLT VT,
612 const MachineFunction &MF) const override;
613
614 /// If a physical register, this returns the register that receives the
615 /// exception address on entry to an EH pad.
617 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
618
619 /// If a physical register, this returns the register that receives the
620 /// exception typeid on entry to a landing pad.
622 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
623
624 bool needsFixedCatchObjects() const override;
625
626 /// This method returns a target specific FastISel object,
627 /// or null if the target does not support "fast" ISel.
628 FastISel *
630 const TargetLibraryInfo *libInfo,
631 const LibcallLoweringInfo *libcallLowering) const override;
632
633 /// If the target has a standard location for the stack protector cookie,
634 /// returns the address of that location. Otherwise, returns nullptr.
636 const LibcallLoweringInfo &Libcalls) const override;
637
638 bool useLoadStackGuardNode(const Module &M) const override;
639 bool useStackGuardXorFP() const override;
640 void
642 const LibcallLoweringInfo &Libcalls) const override;
644 const SDLoc &DL) const override;
645
646
647 /// Return true if the target stores SafeStack pointer at a fixed offset in
648 /// some non-standard address space, and populates the address space and
649 /// offset as appropriate.
651 IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override;
652
653 std::pair<SDValue, SDValue> BuildFILD(EVT DstVT, EVT SrcVT, const SDLoc &DL,
654 SDValue Chain, SDValue Pointer,
655 MachinePointerInfo PtrInfo,
656 Align Alignment,
657 SelectionDAG &DAG) const;
658
659 /// Customize the preferred legalization strategy for certain types.
660 LegalizeTypeAction getPreferredVectorAction(MVT VT) const override;
661
663 EVT VT) const override;
664
667 EVT VT) const override;
668
670 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
671 unsigned &NumIntermediates, MVT &RegisterVT) const override;
672
674 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
675 const DataLayout &DL) const override;
676
677 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
678
679 bool supportSwiftError() const override;
680
681 bool supportKCFIBundles() const override { return true; }
682
685 const TargetInstrInfo *TII) const override;
686
687 bool hasStackProbeSymbol(const MachineFunction &MF) const override;
688 bool hasInlineStackProbe(const MachineFunction &MF) const override;
689 StringRef getStackProbeSymbolName(const MachineFunction &MF) const override;
690
691 unsigned getStackProbeSize(const MachineFunction &MF) const;
692
693 bool hasVectorBlend() const override { return true; }
694
695 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
696
698 unsigned OpNo) const override;
699
701 MachineMemOperand *MMO, SDValue &NewLoad,
702 SDValue Ptr, SDValue PassThru,
703 SDValue Mask) const override;
705 MachineMemOperand *MMO, SDValue Ptr, SDValue Val,
706 SDValue Mask) const override;
707
708 /// Lower interleaved load(s) into target specific
709 /// instructions/intrinsics.
710 bool lowerInterleavedLoad(Instruction *Load, Value *Mask,
712 ArrayRef<unsigned> Indices, unsigned Factor,
713 const APInt &GapMask) const override;
714
715 /// Lower interleaved store(s) into target specific
716 /// instructions/intrinsics.
717 bool lowerInterleavedStore(Instruction *Store, Value *Mask,
718 ShuffleVectorInst *SVI, unsigned Factor,
719 const APInt &GapMask) const override;
720
722 int JTI, SelectionDAG &DAG) const override;
723
724 Align getPrefLoopAlignment(MachineLoop *ML) const override;
725
726 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const override {
727 if (VT == MVT::f80)
728 return EVT::getIntegerVT(Context, 96);
730 }
731
732 protected:
733 std::pair<const TargetRegisterClass *, uint8_t>
735 MVT VT) const override;
736
737 private:
738 /// Keep a reference to the X86Subtarget around so that we can
739 /// make the right decision when generating code for different targets.
740 const X86Subtarget &Subtarget;
741
742 /// A list of legal FP immediates.
743 std::vector<APFloat> LegalFPImmediates;
744
745 /// Indicate that this x86 target can instruction
746 /// select the specified FP immediate natively.
747 void addLegalFPImmediate(const APFloat& Imm) {
748 LegalFPImmediates.push_back(Imm);
749 }
750
752 CallingConv::ID CallConv, bool isVarArg,
753 const SmallVectorImpl<ISD::InputArg> &Ins,
754 const SDLoc &dl, SelectionDAG &DAG,
755 SmallVectorImpl<SDValue> &InVals,
756 uint32_t *RegMask) const;
757 SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
758 const SmallVectorImpl<ISD::InputArg> &ArgInfo,
759 const SDLoc &dl, SelectionDAG &DAG,
760 const CCValAssign &VA, MachineFrameInfo &MFI,
761 unsigned i) const;
762 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
763 const SDLoc &dl, SelectionDAG &DAG,
764 const CCValAssign &VA,
765 ISD::ArgFlagsTy Flags, bool isByval) const;
766
767 // Call lowering helpers.
768
769 /// Check whether the call is eligible for sibling call optimization.
770 bool
771 isEligibleForSiblingCallOpt(TargetLowering::CallLoweringInfo &CLI,
772 CCState &CCInfo,
773 SmallVectorImpl<CCValAssign> &ArgLocs) const;
774 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
775 SDValue Chain, bool IsTailCall,
776 bool Is64Bit, int FPDiff,
777 const SDLoc &dl) const;
778
779 unsigned GetAlignedArgumentStackSize(unsigned StackSize,
780 SelectionDAG &DAG) const;
781
782 unsigned getAddressSpace() const;
783
784 SDValue FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned,
785 SDValue &Chain) const;
786 SDValue LRINT_LLRINTHelper(SDNode *N, SelectionDAG &DAG) const;
787
788 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
789 SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
790 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
791 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
792
793 unsigned getGlobalWrapperKind(const GlobalValue *GV,
794 const unsigned char OpFlags) const;
795 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
796 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
797 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
798 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
799 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
800
801 /// Creates target global address or external symbol nodes for calls or
802 /// other uses.
803 SDValue LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG, bool ForCall,
804 bool *IsImpCall) const;
805
806 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
807 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
808 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
809 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
810 SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
811 SDValue LowerLRINT_LLRINT(SDValue Op, SelectionDAG &DAG) const;
812 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
813 SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
814 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
815 SDValue LowerConditionalBranch(SDValue Op, SelectionDAG &DAG) const;
816 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
817 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
818 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
819 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
820 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
821 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
822 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
823 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
824 ByValCopyKind ByValNeedsCopyForTailCall(SelectionDAG &DAG, SDValue Src,
825 SDValue Dst,
826 ISD::ArgFlagsTy Flags) const;
827 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
828 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
829 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
830 SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
831 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
832 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
833 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
834 SDValue LowerGET_FPENV_MEM(SDValue Op, SelectionDAG &DAG) const;
835 SDValue LowerSET_FPENV_MEM(SDValue Op, SelectionDAG &DAG) const;
836 SDValue LowerRESET_FPENV(SDValue Op, SelectionDAG &DAG) const;
837 SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
838 SDValue LowerWin64_FP_TO_INT128(SDValue Op, SelectionDAG &DAG,
839 SDValue &Chain) const;
840 SDValue LowerWin64_INT128_TO_FP(SDValue Op, SelectionDAG &DAG) const;
841 SDValue LowerGC_TRANSITION(SDValue Op, SelectionDAG &DAG) const;
842 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
843 SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const;
844 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
845 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
846 SDValue LowerFP_TO_BF16(SDValue Op, SelectionDAG &DAG) const;
847
848 SDValue
849 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
850 const SmallVectorImpl<ISD::InputArg> &Ins,
851 const SDLoc &dl, SelectionDAG &DAG,
852 SmallVectorImpl<SDValue> &InVals) const override;
853 SDValue LowerCall(CallLoweringInfo &CLI,
854 SmallVectorImpl<SDValue> &InVals) const override;
855
856 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
857 const SmallVectorImpl<ISD::OutputArg> &Outs,
858 const SmallVectorImpl<SDValue> &OutVals,
859 const SDLoc &dl, SelectionDAG &DAG) const override;
860
861 bool supportSplitCSR(MachineFunction *MF) const override {
862 return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
863 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
864 }
865 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
866 void insertCopiesSplitCSR(
867 MachineBasicBlock *Entry,
868 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
869
870 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
871
872 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
873
874 EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
875 ISD::NodeType ExtendKind) const override;
876
877 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
878 bool isVarArg,
879 const SmallVectorImpl<ISD::OutputArg> &Outs,
880 LLVMContext &Context,
881 const Type *RetTy) const override;
882
883 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
885
887 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
888
890 shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
892 shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override;
894 shouldExpandLogicAtomicRMWInIR(const AtomicRMWInst *AI) const;
896 shouldCastAtomicLoadInIR(LoadInst *LI) const override;
897 void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const override;
898 void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const override;
899
900 LoadInst *
901 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
902
903 bool shouldIssueAtomicLoadForAtomicEmulationLoop() const override {
904 return false;
905 }
906
907 bool needsCmpXchgNb(Type *MemType) const;
908
909 void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
910 MachineBasicBlock *DispatchBB, int FI) const;
911
912 // Utility function to emit the low-level va_arg code for X86-64.
913 MachineBasicBlock *
914 EmitVAARGWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
915
916 /// Utility function to emit the xmm reg save portion of va_start.
917 MachineBasicBlock *EmitLoweredCascadedSelect(MachineInstr &MI1,
918 MachineInstr &MI2,
919 MachineBasicBlock *BB) const;
920
921 MachineBasicBlock *EmitLoweredSelect(MachineInstr &I,
922 MachineBasicBlock *BB) const;
923
924 MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
925 MachineBasicBlock *BB) const;
926
927 MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
928 MachineBasicBlock *BB) const;
929
930 MachineBasicBlock *EmitLoweredProbedAlloca(MachineInstr &MI,
931 MachineBasicBlock *BB) const;
932
933 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr &MI,
934 MachineBasicBlock *BB) const;
935
936 MachineBasicBlock *EmitLoweredIndirectThunk(MachineInstr &MI,
937 MachineBasicBlock *BB) const;
938
939 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
940 MachineBasicBlock *MBB) const;
941
942 void emitSetJmpShadowStackFix(MachineInstr &MI,
943 MachineBasicBlock *MBB) const;
944
945 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
946 MachineBasicBlock *MBB) const;
947
948 MachineBasicBlock *emitLongJmpShadowStackFix(MachineInstr &MI,
949 MachineBasicBlock *MBB) const;
950
951 MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr &MI,
952 MachineBasicBlock *MBB) const;
953
954 MachineBasicBlock *emitPatchableEventCall(MachineInstr &MI,
955 MachineBasicBlock *MBB) const;
956
957 /// Emit flags for the given setcc condition and operands. Also returns the
958 /// corresponding X86 condition code constant in X86CC.
959 SDValue emitFlagsForSetcc(SDValue Op0, SDValue Op1, ISD::CondCode CC,
960 const SDLoc &dl, SelectionDAG &DAG,
961 SDValue &X86CC) const;
962
963 bool optimizeFMulOrFDivAsShiftAddBitcast(SDNode *N, SDValue FPConst,
964 SDValue IntPow2) const override;
965
966 /// Check if replacement of SQRT with RSQRT should be disabled.
967 bool isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const override;
968
969 /// Use rsqrt* to speed up sqrt calculations.
970 SDValue getSqrtEstimate(SDValue Op, SelectionDAG &DAG, int Enabled,
971 int &RefinementSteps, bool &UseOneConstNR,
972 bool Reciprocal) const override;
973
974 /// Use rcp* to speed up fdiv calculations.
975 SDValue getRecipEstimate(SDValue Op, SelectionDAG &DAG, int Enabled,
976 int &RefinementSteps) const override;
977
978 /// Reassociate floating point divisions into multiply by reciprocal.
979 unsigned combineRepeatedFPDivisors() const override;
980
981 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
982 SmallVectorImpl<SDNode *> &Created) const override;
983
984 SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
985 SDValue V2) const;
986 };
987
988 namespace X86 {
989 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
990 const TargetLibraryInfo *libInfo,
991 const LibcallLoweringInfo *libcallLowering);
992 } // end namespace X86
993
994 // X86 specific Gather/Scatter nodes.
995 // The class has the same order of operands as MaskedGatherScatterSDNode for
996 // convenience.
998 public:
999 // This is a intended as a utility and should never be directly created.
1002
1003 const SDValue &getBasePtr() const { return getOperand(3); }
1004 const SDValue &getIndex() const { return getOperand(4); }
1005 const SDValue &getMask() const { return getOperand(2); }
1006 const SDValue &getScale() const { return getOperand(5); }
1007
1008 static bool classof(const SDNode *N) {
1009 return N->getOpcode() == X86ISD::MGATHER ||
1010 N->getOpcode() == X86ISD::MSCATTER;
1011 }
1012 };
1013
1015 public:
1016 const SDValue &getPassThru() const { return getOperand(1); }
1017
1018 static bool classof(const SDNode *N) {
1019 return N->getOpcode() == X86ISD::MGATHER;
1020 }
1021 };
1022
1024 public:
1025 const SDValue &getValue() const { return getOperand(1); }
1026
1027 static bool classof(const SDNode *N) {
1028 return N->getOpcode() == X86ISD::MSCATTER;
1029 }
1030 };
1031
1032 /// Generate unpacklo/unpackhi shuffle mask.
1033 void createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask, bool Lo,
1034 bool Unary);
1035
1036 /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
1037 /// imposed by AVX and specific to the unary pattern. Example:
1038 /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
1039 /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
1040 void createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask, bool Lo);
1041
1042} // end namespace llvm
1043
1044#endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
return SDValue()
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, const SDLoc &dl)
LowerMemOpCallTo - Store the argument to the stack or remember it in case of tail calls.
const SmallVectorImpl< MachineOperand > & Cond
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
This file describes how to lower LLVM code to machine code.
static bool is64Bit(const char *name)
static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
Class for arbitrary precision integers.
Definition APInt.h:78
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Tracks which library functions to use for a particular subtarget.
This class is used to represent ISD::LOAD nodes.
Context object for machine code objects.
Definition MCContext.h:83
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
Machine Value Type.
static MVT getIntegerVT(unsigned BitWidth)
Instructions::iterator instr_iterator
Representation of each machine instruction.
A description of a memory reference used in the backend.
unsigned getAddrSpace() const
Flags
Flags values. These may be or'd together.
Flags getFlags() const
Return the raw flags of the source value,.
LLVM_ABI Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemoryVT, PointerUnion< MachineMemOperand *, MachineMemOperand ** > MemRefs)
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const SDValue & getOperand(unsigned Num) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool shouldIssueAtomicLoadForAtomicEmulationLoop(void) const
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
AndOrSETCCFoldKind
Enum of different potentially desirable ways to fold (and/or (setcc ...), (setcc ....
NegatibleCost
Enum that specifies when a float negation is beneficial.
std::vector< ArgListEntry > ArgListTy
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
TargetLowering(const TargetLowering &)=delete
virtual ArrayRef< MCPhysReg > getRoundingControlRegisters() const
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM Value Representation.
Definition Value.h:75
const SDValue & getPassThru() const
static bool classof(const SDNode *N)
static bool classof(const SDNode *N)
const SDValue & getValue() const
static bool classof(const SDNode *N)
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Overflow nodes should get combined/lowered to optimal instructions (they should allow eliminating exp...
Align getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const override
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
bool preferABDSToABSWithNSW(EVT VT) const override
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values int...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
std::pair< SDValue, SDValue > BuildFILD(EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer, MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth) const override
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success...
bool isMemoryAccessFast(EVT VT, Align Alignment) const
SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag, const SDLoc &DL, const AsmOperandInfo &Constraint, SelectionDAG &DAG) const override
Handle Lowering flag assembly outputs.
bool supportKCFIBundles() const override
Return true if the target supports kcfi operand bundles.
const char * LowerXConstraint(EVT ConstraintVT) const override
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
SDValue SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const override
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
bool useLoadStackGuardNode(const Module &M) const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth) const override
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
bool convertSelectOfConstantsToMath(EVT VT) const override
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint letter, return the type of constraint for this target.
bool hasVectorBlend() const override
Return true if the target has a vector blend instruction.
Value * getIRStackGuard(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool useSoftFloat() const override
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const override
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
bool isLegalStoreImmediate(int64_t Imm) const override
Return true if the specified immediate is legal for the value input of a store instruction.
SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const override
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize, NegatibleCost &Cost, unsigned Depth) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
Value * getSafeStackPointerLocation(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override
Return true if the target stores SafeStack pointer at a fixed offset in some non-standard address spa...
bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override
Return true if the target has native support for the specified value type and it is 'desirable' to us...
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool isSafeMemOpType(MVT VT) const override
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
MVT getScalarShiftAmountTy(const DataLayout &, EVT VT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem, unsigned AddrSpace) const override
Return true if it is expected to be cheaper to do a store of vector constant with the given size and ...
Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
Return the desired alignment for ByVal aggregate function arguments in the caller parameter area.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool shouldSplatInsEltVarIndex(EVT VT) const override
Return true if inserting a scalar into a variable element of an undef vector is more efficiently hand...
bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const override
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
MVT hasFastEqualityCompare(unsigned NumBits) const override
Vector-sized comparisons are fast using PCMPEQ + PMOVMSK or PTEST.
bool SimplifyDemandedVectorEltsForTargetShuffle(SDValue Op, const APInt &DemandedElts, unsigned MaskIndex, TargetLoweringOpt &TLO, unsigned Depth) const
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
unsigned preferedOpcodeForCmpEqPiecesOfOperand(EVT VT, unsigned ShiftOpc, bool MayTransformRotate, const APInt &ShiftOrRotateAmt, const std::optional< APInt > &AndMask) const override
bool isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode Cond, EVT VT) const override
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset) const override
Return true if we believe it is correct and profitable to reduce the load node to a smaller type.
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, const MachineMemOperand &MMO, unsigned *Fast) const
bool preferScalarizeSplat(SDNode *N) const override
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, UndefPoisonKind Kind, bool ConsiderFlags, unsigned Depth) const override
Return true if Op can create undef or poison from non-undef & non-poison operands.
bool lowerInterleavedStore(Instruction *Store, Value *Mask, ShuffleVectorInst *SVI, unsigned Factor, const APInt &GapMask) const override
Lower interleaved store(s) into target specific instructions/intrinsics.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Returns true if the target allows unaligned memory accesses of the specified type.
bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const override
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool isProfitableToCombineMinNumMaxNum(EVT VT) const override
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const override
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
bool lowerInterleavedLoad(Instruction *Load, Value *Mask, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor, const APInt &GapMask) const override
Lower interleaved load(s) into target specific instructions/intrinsics.
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const override
Return true if it is profitable to convert a select of FP constants into a constant pool load whose a...
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
StringRef getStackProbeSymbolName(const MachineFunction &MF) const override
Returns the name of the symbol used to emit stack probes or the empty string if not applicable.
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
bool isShuffleMaskLegal(ArrayRef< int > Mask, EVT VT) const override
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
bool useStackGuardXorFP() const override
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
bool shouldScalarizeBinop(SDValue) const override
Scalar ops always have equal or better analysis/performance/power than the vector equivalent,...
void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const override
void insertSSPDeclarations(Module &M, const LibcallLoweringInfo &Libcalls) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type Ty1 to type Ty2.
bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override
Return true if it is profitable to fold a pair of shifts into a mask.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
bool areJTsAllowed(const Function *Fn) const override
Returns true if lowering to a jump table is allowed.
bool isCommutativeBinOp(unsigned Opcode) const override
Returns true if the opcode is a commutative binary operation.
bool isScalarFPTypeInSSEReg(EVT VT) const
Return true if the specified scalar FP type is computed in an SSE register, not on the X87 floating p...
MVT getPreferredSwitchConditionType(LLVMContext &Context, EVT ConditionVT) const override
Returns preferred type for switch condition.
SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const override
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
bool isVectorClearMaskLegal(ArrayRef< int > Mask, EVT VT) const override
Similar to isShuffleMaskLegal.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &Info, const char *Constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Customize the preferred legalization strategy for certain types.
bool shouldConvertPhiType(Type *From, Type *To) const override
Given a set in interconnected phis of type 'From' that are loaded/stored or bitcast to type 'To',...
bool hasStackProbeSymbol(const MachineFunction &MF) const override
Returns true if stack probing through a function call is requested.
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type Ty1 implicit zero-extends the valu...
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
This function returns true if the memory access is aligned or if the target allows this specific unal...
bool isTargetCanonicalConstantNode(SDValue Op) const override
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const override
bool mergeStoresAfterLegalization(EVT MemVT) const override
Do not merge vector stores after legalization because that may conflict with x86-specific store split...
TargetLowering::AndOrSETCCFoldKind isDesirableToCombineLogicOpOfSETCC(const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const override
Return prefered fold type, Abs if this is a vector, AddAnd if its an integer, None otherwise.
bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override
There are two ways to clear extreme bits (either low or high): Mask: x & (-1 << y) (the instcombine c...
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool addressingModeSupportsTLS(const GlobalValue &GV) const override
Returns true if the targets addressing mode can target thread local storage (TLS).
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
void getTgtMemIntrinsic(SmallVectorImpl< IntrinsicInfo > &Infos, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const override
Expands target specific indirect branch for the case of JumpTable expansion.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isBinOp(unsigned Opcode) const override
Add x86-specific opcodes to the default list.
bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override
Return true if the target has native support for the specified value type and it is 'desirable' to us...
SDValue unwrapAddress(SDValue N) const override
CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs) const override
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the value type to use for ISD::SETCC.
X86TargetLowering(const X86TargetMachine &TM, const X86Subtarget &STI)
bool isTargetCanonicalSelect(SDNode *N) const override
Return true if the given select/vselect should be considered canonical and not be transformed.
bool isVectorLoadExtDesirable(SDValue) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const override
This method returns the constant pool value that will be loaded by LD.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const override
For types supported by the target, this is an identity function.
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
unsigned getStackProbeSize(const MachineFunction &MF) const
bool ShouldShrinkFPConstant(EVT VT) const override
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
Replace the results of node with an illegal result type with new values built out of custom code.
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, UndefPoisonKind Kind, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, Kind can be used to track poison ...
bool needsFixedCatchObjects() const override
bool isExtractVecEltCheap(EVT VT, unsigned Index) const override
Extract of a scalar FP value from index 0 of a vector is free.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition ISDOpcodes.h:600
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:993
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition ISDOpcodes.h:614
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Define some predicates that are used for node matching.
RoundingMode
Current rounding mode is represented in bits 11:10 of FPSR.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering)
bool mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT, const X86Subtarget &Subtarget, bool AssumeSingleUse=false)
Check if Op is a load operation that could be folded into a vector splat instruction as a memory oper...
bool isZeroNode(SDValue Elt)
Returns true if Elt is a constant zero or floating point constant +0.0.
bool mayFoldIntoZeroExtend(SDValue Op)
Check if Op is an operation that could be folded into a zero extend x86 instruction.
bool mayFoldIntoStore(SDValue Op)
Check if Op is a value that could be used to fold a store into some other x86 instruction as a memory...
bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget, const MachineFunction &MF)
True if the target supports the extended frame for async Swift functions.
int getRoundingModeX86(unsigned RM)
Convert LLVM rounding mode to X86 rounding mode.
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
bool mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget, bool AssumeSingleUse=false, bool IgnoreAlignment=false)
Check if Op is a load operation that could be folded into some other x86 instruction as a memory oper...
bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, bool hasSymbolicDisplacement)
Returns true of the given offset can be fit into displacement field of the instruction.
bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs)
If Op is a constant whose elements are all the same constant or undefined, return true and return the...
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
InstructionCost Cost
void createUnpackShuffleMask(EVT VT, SmallVectorImpl< int > &Mask, bool Lo, bool Unary)
Generate unpacklo/unpackhi shuffle mask.
void createSplat2ShuffleMask(MVT VT, SmallVectorImpl< int > &Mask, bool Lo)
Similar to unpacklo/unpackhi, but without the 128-bit lane limitation imposed by AVX and specific to ...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
UndefPoisonKind
Enumeration to track whether we are interested in Undef, Poison, or both.
Definition UndefPoison.h:20
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition ValueTypes.h:155
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:61
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:176
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:331
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:165
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
This class contains a discriminated union of information about pointers in memory operands,...