LLVM 22.0.0git
PPCISelLowering.h
Go to the documentation of this file.
1//===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that PPC uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
15#define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
16
17#include "PPCInstrInfo.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Function.h"
29#include "llvm/IR/InlineAsm.h"
30#include "llvm/IR/Metadata.h"
31#include "llvm/IR/Type.h"
32#include <optional>
33#include <utility>
34
35namespace llvm {
36
37 /// Define some predicates that are used for node matching.
38 namespace PPC {
39
40 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
41 /// VPKUHUM instruction.
42 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
43 SelectionDAG &DAG);
44
45 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
46 /// VPKUWUM instruction.
47 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
48 SelectionDAG &DAG);
49
50 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
51 /// VPKUDUM instruction.
52 bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
53 SelectionDAG &DAG);
54
55 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
56 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
57 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
58 unsigned ShuffleKind, SelectionDAG &DAG);
59
60 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
61 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
62 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
63 unsigned ShuffleKind, SelectionDAG &DAG);
64
65 /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
66 /// a VMRGEW or VMRGOW instruction
67 bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
68 unsigned ShuffleKind, SelectionDAG &DAG);
69 /// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable
70 /// for a XXSLDWI instruction.
71 bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
72 bool &Swap, bool IsLE);
73
74 /// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable
75 /// for a XXBRH instruction.
76 bool isXXBRHShuffleMask(ShuffleVectorSDNode *N);
77
78 /// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable
79 /// for a XXBRW instruction.
80 bool isXXBRWShuffleMask(ShuffleVectorSDNode *N);
81
82 /// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable
83 /// for a XXBRD instruction.
84 bool isXXBRDShuffleMask(ShuffleVectorSDNode *N);
85
86 /// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable
87 /// for a XXBRQ instruction.
88 bool isXXBRQShuffleMask(ShuffleVectorSDNode *N);
89
90 /// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable
91 /// for a XXPERMDI instruction.
92 bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
93 bool &Swap, bool IsLE);
94
95 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
96 /// shift amount, otherwise return -1.
97 int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
98 SelectionDAG &DAG);
99
100 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
101 /// specifies a splat of a single element that is suitable for input to
102 /// VSPLTB/VSPLTH/VSPLTW.
103 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
104
105 /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
106 /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
107 /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
108 /// vector into the other. This function will also set a couple of
109 /// output parameters for how much the source vector needs to be shifted and
110 /// what byte number needs to be specified for the instruction to put the
111 /// element in the desired location of the target vector.
112 bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
113 unsigned &InsertAtByte, bool &Swap, bool IsLE);
114
115 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
116 /// appropriate for PPC mnemonics (which have a big endian bias - namely
117 /// elements are counted from the left of the vector register).
118 unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
119 SelectionDAG &DAG);
120
121 /// get_VSPLTI_elt - If this is a build_vector of constants which can be
122 /// formed by using a vspltis[bhw] instruction of the specified element
123 /// size, return the constant being splatted. The ByteSize field indicates
124 /// the number of bytes of each element [124] -> [bhw].
125 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
126
127 // Flags for computing the optimal addressing mode for loads and stores.
130
131 // Extension mode for integer loads.
133 MOF_ZExt = 1 << 1,
134 MOF_NoExt = 1 << 2,
135
136 // Address computation flags.
137 MOF_NotAddNorCst = 1 << 5, // Not const. or sum of ptr and scalar.
138 MOF_RPlusSImm16 = 1 << 6, // Reg plus signed 16-bit constant.
139 MOF_RPlusLo = 1 << 7, // Reg plus signed 16-bit relocation
140 MOF_RPlusSImm16Mult4 = 1 << 8, // Reg plus 16-bit signed multiple of 4.
141 MOF_RPlusSImm16Mult16 = 1 << 9, // Reg plus 16-bit signed multiple of 16.
142 MOF_RPlusSImm34 = 1 << 10, // Reg plus 34-bit signed constant.
143 MOF_RPlusR = 1 << 11, // Sum of two variables.
144 MOF_PCRel = 1 << 12, // PC-Relative relocation.
145 MOF_AddrIsSImm32 = 1 << 13, // A simple 32-bit constant.
146
147 // The in-memory type.
148 MOF_SubWordInt = 1 << 15,
149 MOF_WordInt = 1 << 16,
151 MOF_ScalarFloat = 1 << 18, // Scalar single or double precision.
152 MOF_Vector = 1 << 19, // Vector types and quad precision scalars.
153 MOF_Vector256 = 1 << 20,
154
155 // Subtarget features.
160 };
161
162 // The addressing modes for loads and stores.
172 } // end namespace PPC
173
175 const PPCSubtarget &Subtarget;
176
177 public:
178 explicit PPCTargetLowering(const PPCTargetMachine &TM,
179 const PPCSubtarget &STI);
180
181 bool isSelectSupported(SelectSupportKind Kind) const override {
182 // PowerPC does not support scalar condition selects on vectors.
184 }
185
186 /// getPreferredVectorAction - The code we generate when vector types are
187 /// legalized by promoting the integer element type is often much worse
188 /// than code we generate if we widen the type for applicable vector types.
189 /// The issue with promoting is that the vector is scalaraized, individual
190 /// elements promoted and then the vector is rebuilt. So say we load a pair
191 /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
192 /// loads, moves back into VSR's (or memory ops if we don't have moves) and
193 /// then the VPERM for the shuffle. All in all a very slow sequence.
195 const override {
196 // Default handling for scalable and single-element vectors.
197 if (VT.isScalableVector() || VT.getVectorNumElements() == 1)
199
200 // Split and promote vNi1 vectors so we don't produce v256i1/v512i1
201 // types as those are only for MMA instructions.
202 if (VT.getScalarSizeInBits() == 1 && VT.getSizeInBits() > 16)
203 return TypeSplitVector;
204 if (VT.getScalarSizeInBits() == 1)
205 return TypePromoteInteger;
206
207 // Widen vectors that have reasonably sized elements.
208 if (VT.getScalarSizeInBits() % 8 == 0)
209 return TypeWidenVector;
211 }
212
213 bool useSoftFloat() const override;
214
215 bool hasSPE() const;
216
217 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
218 return MVT::i32;
219 }
220
221 bool isCheapToSpeculateCttz(Type *Ty) const override {
222 return true;
223 }
224
225 bool isCheapToSpeculateCtlz(Type *Ty) const override {
226 return true;
227 }
228
229 bool
231 unsigned ElemSizeInBits,
232 unsigned &Index) const override;
233
234 bool isCtlzFast() const override {
235 return true;
236 }
237
238 bool isEqualityCmpFoldedWithSignedCmp() const override {
239 return false;
240 }
241
242 bool hasAndNotCompare(SDValue) const override {
243 return true;
244 }
245
246 bool preferIncOfAddToSubOfNot(EVT VT) const override;
247
248 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
249 return VT.isScalarInteger();
250 }
251
253 bool OptForSize, NegatibleCost &Cost,
254 unsigned Depth = 0) const override;
255
256 /// getSetCCResultType - Return the ISD::SETCC ValueType
258 EVT VT) const override;
259
260 /// Return true if target always benefits from combining into FMA for a
261 /// given value type. This must typically return false on targets where FMA
262 /// takes more cycles to execute than FADD.
263 bool enableAggressiveFMAFusion(EVT VT) const override;
264
265 /// getPreIndexedAddressParts - returns true by value, base pointer and
266 /// offset pointer and addressing mode by reference if the node's address
267 /// can be legally represented as pre-indexed load / store address.
271 SelectionDAG &DAG) const override;
272
273 /// SelectAddressEVXRegReg - Given the specified addressed, check to see if
274 /// it can be more efficiently represented as [r+imm].
276 SelectionDAG &DAG) const;
277
278 /// SelectAddressRegReg - Given the specified addressed, check to see if it
279 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment
280 /// is non-zero, only accept displacement which is not suitable for [r+imm].
281 /// Returns false if it can be represented by [r+imm], which are preferred.
283 SelectionDAG &DAG,
284 MaybeAlign EncodingAlignment = std::nullopt) const;
285
286 /// SelectAddressRegImm - Returns true if the address N can be represented
287 /// by a base register plus a signed 16-bit displacement [r+imm], and if it
288 /// is not better represented as reg+reg. If \p EncodingAlignment is
289 /// non-zero, only accept displacements suitable for instruction encoding
290 /// requirement, i.e. multiples of 4 for DS form.
292 SelectionDAG &DAG,
293 MaybeAlign EncodingAlignment) const;
295 SelectionDAG &DAG) const;
296
297 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
298 /// represented as an indexed [r+r] operation.
300 SelectionDAG &DAG) const;
301
302 /// SelectAddressPCRel - Represent the specified address as pc relative to
303 /// be represented as [pc+imm]
305
307
308 /// LowerOperation - Provide custom lowering hooks for some operations.
309 ///
310 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
311
312 /// ReplaceNodeResults - Replace the results of node with an illegal result
313 /// type with new values built out of custom code.
314 ///
316 SelectionDAG &DAG) const override;
317
318 SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
319 SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
320
321 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
322
323 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
324 SmallVectorImpl<SDNode *> &Created) const override;
325
326 Register getRegisterByName(const char* RegName, LLT VT,
327 const MachineFunction &MF) const override;
328
330 KnownBits &Known,
331 const APInt &DemandedElts,
332 const SelectionDAG &DAG,
333 unsigned Depth = 0) const override;
334
335 Align getPrefLoopAlignment(MachineLoop *ML) const override;
336
337 bool shouldInsertFencesForAtomic(const Instruction *I) const override {
338 return true;
339 }
340
341 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
342 AtomicOrdering Ord) const override;
343
344 Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
345 AtomicOrdering Ord) const override;
346
348 AtomicOrdering Ord) const override;
350 AtomicOrdering Ord) const override;
351
352 bool shouldInlineQuadwordAtomics() const;
353
355 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
356
359
361 AtomicRMWInst *AI, Value *AlignedAddr,
362 Value *Incr, Value *Mask,
363 Value *ShiftAmt,
364 AtomicOrdering Ord) const override;
367 Value *AlignedAddr, Value *CmpVal,
368 Value *NewVal, Value *Mask,
369 AtomicOrdering Ord) const override;
370
373 MachineBasicBlock *MBB) const override;
376 unsigned AtomicSize,
377 unsigned BinOpcode,
378 unsigned CmpOpcode = 0,
379 unsigned CmpPred = 0) const;
382 bool is8bit,
383 unsigned Opcode,
384 unsigned CmpOpcode = 0,
385 unsigned CmpPred = 0) const;
386
388 MachineBasicBlock *MBB) const;
389
391 MachineBasicBlock *MBB) const;
392
394 MachineBasicBlock *MBB) const;
395
396 bool hasInlineStackProbe(const MachineFunction &MF) const override;
397
398 unsigned getStackProbeSize(const MachineFunction &MF) const;
399
400 ConstraintType getConstraintType(StringRef Constraint) const override;
401
402 /// Examine constraint string and operand type and determine a weight value.
403 /// The operand object must already have been set up with the operand type.
405 AsmOperandInfo &info, const char *constraint) const override;
406
407 std::pair<unsigned, const TargetRegisterClass *>
409 StringRef Constraint, MVT VT) const override;
410
411 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
412 /// function arguments in the caller parameter area.
413 Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override;
414
415 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
416 /// vector. If it is invalid, don't add anything to Ops.
418 std::vector<SDValue> &Ops,
419 SelectionDAG &DAG) const override;
420
422 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
423 if (ConstraintCode == "es")
425 else if (ConstraintCode == "Q")
427 else if (ConstraintCode == "Z")
429 else if (ConstraintCode == "Zy")
431 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
432 }
433
436 SelectionDAG &DAG) const override;
437
438 /// isLegalAddressingMode - Return true if the addressing mode represented
439 /// by AM is legal for this target, for a load/store of the specified type.
440 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
441 Type *Ty, unsigned AS,
442 Instruction *I = nullptr) const override;
443
444 /// isLegalICmpImmediate - Return true if the specified immediate is legal
445 /// icmp immediate, that is the target has icmp instructions which can
446 /// compare a register against the immediate without having to materialize
447 /// the immediate into a register.
448 bool isLegalICmpImmediate(int64_t Imm) const override;
449
450 /// isLegalAddImmediate - Return true if the specified immediate is legal
451 /// add immediate, that is the target has add instructions which can
452 /// add a register and the immediate without having to materialize
453 /// the immediate into a register.
454 bool isLegalAddImmediate(int64_t Imm) const override;
455
456 /// isTruncateFree - Return true if it's free to truncate a value of
457 /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
458 /// register X1 to i32 by referencing its sub-register R1.
459 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
460 bool isTruncateFree(EVT VT1, EVT VT2) const override;
461
462 bool isZExtFree(SDValue Val, EVT VT2) const override;
463
464 bool isFPExtFree(EVT DestVT, EVT SrcVT) const override;
465
466 /// Returns true if it is beneficial to convert a load of a constant
467 /// to just the constant itself.
469 Type *Ty) const override;
470
471 bool convertSelectOfConstantsToMath(EVT VT) const override {
472 return true;
473 }
474
475 bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
476 SDValue C) const override;
477
479 EVT VT) const override {
480 // Only handle float load/store pair because float(fpr) load/store
481 // instruction has more cycles than integer(gpr) load/store in PPC.
482 if (Opc != ISD::LOAD && Opc != ISD::STORE)
483 return false;
484 if (VT != MVT::f32 && VT != MVT::f64)
485 return false;
486
487 return true;
488 }
489
490 // Returns true if the address of the global is stored in TOC entry.
492
493 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
494
495 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I,
496 MachineFunction &MF,
497 unsigned Intrinsic) const override;
498
499 /// It returns EVT::Other if the type should be determined using generic
500 /// target-independent logic.
501 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
502 const AttributeList &FuncAttributes) const override;
503
504 /// Is unaligned memory access allowed for the given type, and is it fast
505 /// relative to software emulation.
507 EVT VT, unsigned AddrSpace, Align Alignment = Align(1),
509 unsigned *Fast = nullptr) const override;
510
511 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
512 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
513 /// expanded to FMAs when this method returns true, otherwise fmuladd is
514 /// expanded to fmul + fadd.
516 EVT VT) const override;
517
518 bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
519
520 /// isProfitableToHoist - Check if it is profitable to hoist instruction
521 /// \p I to its dominator block.
522 /// For example, it is not profitable if \p I and it's only user can form a
523 /// FMA instruction, because Powerpc prefers FMADD.
524 bool isProfitableToHoist(Instruction *I) const override;
525
526 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
527
528 // Should we expand the build vector with shuffles?
529 bool
531 unsigned DefinedValues) const override;
532
533 // Keep the zero-extensions for arguments to libcalls.
534 bool shouldKeepZExtForFP16Conv() const override { return true; }
535
536 /// createFastISel - This method returns a target-specific FastISel object,
537 /// or null if the target does not support "fast" instruction selection.
539 const TargetLibraryInfo *LibInfo) const override;
540
541 /// Returns true if an argument of type Ty needs to be passed in a
542 /// contiguous block of registers in calling convention CallConv.
544 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
545 const DataLayout &DL) const override {
546 // We support any array type as "consecutive" block in the parameter
547 // save area. The element type defines the alignment requirement and
548 // whether the argument should go in GPRs, FPRs, or VRs if available.
549 //
550 // Note that clang uses this capability both to implement the ELFv2
551 // homogeneous float/vector aggregate ABI, and to avoid having to use
552 // "byval" when passing aggregates that might fully fit in registers.
553 return Ty->isArrayTy();
554 }
555
556 /// If a physical register, this returns the register that receives the
557 /// exception address on entry to an EH pad.
559 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
560
561 /// If a physical register, this returns the register that receives the
562 /// exception typeid on entry to a landing pad.
564 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
565
566 /// Override to support customized stack guard loading.
567 bool useLoadStackGuardNode(const Module &M) const override;
568
569 bool isFPImmLegal(const APFloat &Imm, EVT VT,
570 bool ForCodeSize) const override;
571
572 unsigned getJumpTableEncoding() const override;
573 bool isJumpTableRelative() const override;
575 SelectionDAG &DAG) const override;
577 unsigned JTI,
578 MCContext &Ctx) const override;
579
580 /// SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode),
581 /// compute the address flags of the node, get the optimal address mode
582 /// based on the flags, and set the Base and Disp based on the address mode.
584 SDValue &Disp, SDValue &Base,
585 SelectionDAG &DAG,
586 MaybeAlign Align) const;
587 /// SelectForceXFormMode - Given the specified address, force it to be
588 /// represented as an indexed [r+r] operation (an XForm instruction).
590 SelectionDAG &DAG) const;
591
593 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
594 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
595 const override;
596 /// Structure that collects some common arguments that get passed around
597 /// between the functions for call lowering.
598 struct CallFlags {
600 const bool IsTailCall : 1;
601 const bool IsVarArg : 1;
602 const bool IsPatchPoint : 1;
603 const bool IsIndirect : 1;
604 const bool HasNest : 1;
605 const bool NoMerge : 1;
606
612 };
613
615 bool IsVarArg) const;
616 bool supportsTailCallFor(const CallBase *CB) const;
617
618 bool hasMultipleConditionRegisters(EVT VT) const override;
619
620 private:
621 struct ReuseLoadInfo {
622 SDValue Ptr;
623 SDValue Chain;
624 SDValue ResChain;
626 bool IsDereferenceable = false;
627 bool IsInvariant = false;
628 Align Alignment;
629 AAMDNodes AAInfo;
630 const MDNode *Ranges = nullptr;
631
632 ReuseLoadInfo() = default;
633
634 MachineMemOperand::Flags MMOFlags() const {
636 if (IsDereferenceable)
638 if (IsInvariant)
640 return F;
641 }
642 };
643
644 // Map that relates a set of common address flags to PPC addressing modes.
645 std::map<PPC::AddrMode, SmallVector<unsigned, 16>> AddrModesMap;
646 void initializeAddrModeMap();
647
648 bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
649 SelectionDAG &DAG,
651
652 void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
653 SelectionDAG &DAG, const SDLoc &dl) const;
654 SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
655 const SDLoc &dl) const;
656
657 bool directMoveIsProfitable(const SDValue &Op) const;
658 SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
659 const SDLoc &dl) const;
660
661 SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
662 const SDLoc &dl) const;
663
664 SDValue LowerTRUNCATEVector(SDValue Op, SelectionDAG &DAG) const;
665
666 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
667 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
668
669 bool IsEligibleForTailCallOptimization(
670 const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
671 CallingConv::ID CallerCC, bool isVarArg,
672 const SmallVectorImpl<ISD::InputArg> &Ins) const;
673
674 bool IsEligibleForTailCallOptimization_64SVR4(
675 const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
676 CallingConv::ID CallerCC, const CallBase *CB, bool isVarArg,
677 const SmallVectorImpl<ISD::OutputArg> &Outs,
678 const SmallVectorImpl<ISD::InputArg> &Ins, const Function *CallerFunc,
679 bool isCalleeExternalSymbol) const;
680
681 bool isEligibleForTCO(const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
682 CallingConv::ID CallerCC, const CallBase *CB,
683 bool isVarArg,
684 const SmallVectorImpl<ISD::OutputArg> &Outs,
685 const SmallVectorImpl<ISD::InputArg> &Ins,
686 const Function *CallerFunc,
687 bool isCalleeExternalSymbol) const;
688
689 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
690 SDValue Chain, SDValue &LROpOut,
691 SDValue &FPOpOut,
692 const SDLoc &dl) const;
693
694 SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, SDValue GA) const;
695
696 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
697 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
698 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
699 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
700 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
701 SDValue LowerGlobalTLSAddressAIX(SDValue Op, SelectionDAG &DAG) const;
702 SDValue LowerGlobalTLSAddressLinux(SDValue Op, SelectionDAG &DAG) const;
703 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
704 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
705 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
706 SDValue LowerSSUBO(SDValue Op, SelectionDAG &DAG) const;
707 SDValue LowerSADDO(SDValue Op, SelectionDAG &DAG) const;
708 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
709 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
710 SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
711 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
712 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
713 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
714 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
715 SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
716 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
717 SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
718 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
719 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
720 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
721 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
722 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
723 const SDLoc &dl) const;
724 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
725 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
726 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
727 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
728 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
729 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
730 SDValue LowerFunnelShift(SDValue Op, SelectionDAG &DAG) const;
731 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
732 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
733 SDValue LowerVPERM(SDValue Op, SelectionDAG &DAG, ArrayRef<int> PermMask,
734 EVT VT, SDValue V1, SDValue V2) const;
735 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
736 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
737 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
738 SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
739 SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
740 SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;
741 SDValue LowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) const;
742 SDValue LowerADDSUBO(SDValue Op, SelectionDAG &DAG) const;
743 SDValue LowerUCMP(SDValue Op, SelectionDAG &DAG) const;
744 SDValue lowerToLibCall(const char *LibCallName, SDValue Op,
745 SelectionDAG &DAG) const;
746 SDValue lowerLibCallBasedOnType(const char *LibCallFloatName,
747 const char *LibCallDoubleName, SDValue Op,
748 SelectionDAG &DAG) const;
749 bool isLowringToMASSFiniteSafe(SDValue Op) const;
750 bool isLowringToMASSSafe(SDValue Op) const;
751 bool isScalarMASSConversionEnabled() const;
752 SDValue lowerLibCallBase(const char *LibCallDoubleName,
753 const char *LibCallFloatName,
754 const char *LibCallDoubleNameFinite,
755 const char *LibCallFloatNameFinite, SDValue Op,
756 SelectionDAG &DAG) const;
757 SDValue lowerPow(SDValue Op, SelectionDAG &DAG) const;
758 SDValue lowerSin(SDValue Op, SelectionDAG &DAG) const;
759 SDValue lowerCos(SDValue Op, SelectionDAG &DAG) const;
760 SDValue lowerLog(SDValue Op, SelectionDAG &DAG) const;
761 SDValue lowerLog10(SDValue Op, SelectionDAG &DAG) const;
762 SDValue lowerExp(SDValue Op, SelectionDAG &DAG) const;
763 SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) const;
764 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
765 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
766 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
767 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
768 SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
769
770 SDValue LowerVP_LOAD(SDValue Op, SelectionDAG &DAG) const;
771 SDValue LowerVP_STORE(SDValue Op, SelectionDAG &DAG) const;
772
773 SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
774 SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
775 SDValue LowerDMFVectorLoad(SDValue Op, SelectionDAG &DAG) const;
776 SDValue LowerDMFVectorStore(SDValue Op, SelectionDAG &DAG) const;
777 SDValue DMFInsert1024(const SmallVectorImpl<SDValue> &Pairs,
778 const SDLoc &dl, SelectionDAG &DAG) const;
779
780 SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
781 CallingConv::ID CallConv, bool isVarArg,
782 const SmallVectorImpl<ISD::InputArg> &Ins,
783 const SDLoc &dl, SelectionDAG &DAG,
784 SmallVectorImpl<SDValue> &InVals) const;
785
786 SDValue FinishCall(CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
787 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
788 SDValue InGlue, SDValue Chain, SDValue CallSeqStart,
789 SDValue &Callee, int SPDiff, unsigned NumBytes,
790 const SmallVectorImpl<ISD::InputArg> &Ins,
791 SmallVectorImpl<SDValue> &InVals,
792 const CallBase *CB) const;
793
794 SDValue
795 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
796 const SmallVectorImpl<ISD::InputArg> &Ins,
797 const SDLoc &dl, SelectionDAG &DAG,
798 SmallVectorImpl<SDValue> &InVals) const override;
799
800 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
801 SmallVectorImpl<SDValue> &InVals) const override;
802
803 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
804 bool isVarArg,
805 const SmallVectorImpl<ISD::OutputArg> &Outs,
806 LLVMContext &Context, const Type *RetTy) const override;
807
808 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
809 const SmallVectorImpl<ISD::OutputArg> &Outs,
810 const SmallVectorImpl<SDValue> &OutVals,
811 const SDLoc &dl, SelectionDAG &DAG) const override;
812
813 SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
814 SelectionDAG &DAG, SDValue ArgVal,
815 const SDLoc &dl) const;
816
817 SDValue LowerFormalArguments_AIX(
818 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
819 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
820 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
821 SDValue LowerFormalArguments_64SVR4(
822 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
823 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
824 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
825 SDValue LowerFormalArguments_32SVR4(
826 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
827 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
828 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
829
830 SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
831 SDValue CallSeqStart,
832 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
833 const SDLoc &dl) const;
834
835 SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags,
836 const SmallVectorImpl<ISD::OutputArg> &Outs,
837 const SmallVectorImpl<SDValue> &OutVals,
838 const SmallVectorImpl<ISD::InputArg> &Ins,
839 const SDLoc &dl, SelectionDAG &DAG,
840 SmallVectorImpl<SDValue> &InVals,
841 const CallBase *CB) const;
842 SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags,
843 const SmallVectorImpl<ISD::OutputArg> &Outs,
844 const SmallVectorImpl<SDValue> &OutVals,
845 const SmallVectorImpl<ISD::InputArg> &Ins,
846 const SDLoc &dl, SelectionDAG &DAG,
847 SmallVectorImpl<SDValue> &InVals,
848 const CallBase *CB) const;
849 SDValue LowerCall_AIX(SDValue Chain, SDValue Callee, CallFlags CFlags,
850 const SmallVectorImpl<ISD::OutputArg> &Outs,
851 const SmallVectorImpl<SDValue> &OutVals,
852 const SmallVectorImpl<ISD::InputArg> &Ins,
853 const SDLoc &dl, SelectionDAG &DAG,
854 SmallVectorImpl<SDValue> &InVals,
855 const CallBase *CB) const;
856
857 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
858 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
859 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
860
861 SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
862 SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
863 SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
864 SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const;
865 SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
866 SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const;
867 SDValue combineVectorShift(SDNode *N, DAGCombinerInfo &DCI) const;
868 SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const;
869 SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const;
870 SDValue combineMUL(SDNode *N, DAGCombinerInfo &DCI) const;
871 SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const;
872 SDValue combineFMALike(SDNode *N, DAGCombinerInfo &DCI) const;
873 SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const;
874 SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const;
875 SDValue combineVectorShuffle(ShuffleVectorSDNode *SVN,
876 SelectionDAG &DAG) const;
877 SDValue combineVReverseMemOP(ShuffleVectorSDNode *SVN, LSBaseSDNode *LSBase,
878 DAGCombinerInfo &DCI) const;
879
880 /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
881 /// SETCC with integer subtraction when (1) there is a legal way of doing it
882 /// (2) keeping the result of comparison in GPR has performance benefit.
883 SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const;
884
885 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
886 int &RefinementSteps, bool &UseOneConstNR,
887 bool Reciprocal) const override;
888 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
889 int &RefinementSteps) const override;
890 SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
891 const DenormalMode &Mode) const override;
892 SDValue getSqrtResultForDenormInput(SDValue Operand,
893 SelectionDAG &DAG) const override;
894 unsigned combineRepeatedFPDivisors() const override;
895
896 SDValue
897 combineElementTruncationToVectorTruncation(SDNode *N,
898 DAGCombinerInfo &DCI) const;
899
900 SDValue combineBVLoadsSpecialValue(SDValue Operand,
901 SelectionDAG &DAG) const;
902
903 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be
904 /// handled by the VINSERTH instruction introduced in ISA 3.0. This is
905 /// essentially any shuffle of v8i16 vectors that just inserts one element
906 /// from one vector into the other.
907 SDValue lowerToVINSERTH(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
908
909 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be
910 /// handled by the VINSERTB instruction introduced in ISA 3.0. This is
911 /// essentially v16i8 vector version of VINSERTH.
912 SDValue lowerToVINSERTB(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
913
914 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
915 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1.
916 SDValue lowerToXXSPLTI32DX(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
917
918 // Return whether the call instruction can potentially be optimized to a
919 // tail call. This will cause the optimizers to attempt to move, or
920 // duplicate return instructions to help enable tail call optimizations.
921 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
922 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
923
924 /// getAddrModeForFlags - Based on the set of address flags, select the most
925 /// optimal instruction format to match by.
926 PPC::AddrMode getAddrModeForFlags(unsigned Flags) const;
927
928 /// computeMOFlags - Given a node N and it's Parent (a MemSDNode), compute
929 /// the address flags of the load/store instruction that is to be matched.
930 /// The address flags are stored in a map, which is then searched
931 /// through to determine the optimal load/store instruction format.
932 unsigned computeMOFlags(const SDNode *Parent, SDValue N,
933 SelectionDAG &DAG) const;
934 }; // end class PPCTargetLowering
935
936 namespace PPC {
937
938 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
939 const TargetLibraryInfo *LibInfo);
940
941 } // end namespace PPC
942
943 bool isIntS16Immediate(SDNode *N, int16_t &Imm);
944 bool isIntS16Immediate(SDValue Op, int16_t &Imm);
945 bool isIntS34Immediate(SDNode *N, int64_t &Imm);
946 bool isIntS34Immediate(SDValue Op, int64_t &Imm);
947
948 bool convertToNonDenormSingle(APInt &ArgAPInt);
949 bool convertToNonDenormSingle(APFloat &ArgAPFloat);
950 bool checkConvertToNonDenormSingle(APFloat &ArgAPFloat);
951
952} // end namespace llvm
953
954#endif // LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
return SDValue()
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
Analysis containing CSE Info
Definition CSEInfo.cpp:27
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
Definition APInt.h:78
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Context object for machine code objects.
Definition MCContext.h:83
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
Metadata node.
Definition Metadata.h:1078
Machine Value Type.
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOInvariant
The memory access always returns the same value (or traps).
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
CCAssignFn * ccAssignFnForCall(CallingConv::ID CC, bool Return, bool IsVarArg) const
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
isTruncateFree - Return true if it's free to truncate a value of type Ty1 to type Ty2.
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isFPExtFree(EVT DestVT, EVT SrcVT) const override
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
PPC::AddrMode SelectForceXFormMode(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
SelectForceXFormMode - Given the specified address, force it to be represented as an indexed [r+r] op...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool supportsTailCallFor(const CallBase *CB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
MachineBasicBlock * emitProbedAlloca(MachineInstr &MI, MachineBasicBlock *MBB) const
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign EncodingAlignment) const
SelectAddressRegImm - Returns true if the address N can be represented by a base register plus a sign...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
bool isSelectSupported(SelectSupportKind Kind) const override
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
bool hasMultipleConditionRegisters(EVT VT) const override
Does the target have multiple (allocatable) condition registers that can be used to store the results...
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
getByValTypeAlignment - Return the desired alignment for ByVal aggregate function arguments in the ca...
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, MaybeAlign EncodingAlignment=std::nullopt) const
SelectAddressRegReg - Given the specified addressed, check to see if it can be more efficiently repre...
MachineBasicBlock * EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, unsigned AtomicSize, unsigned BinOpcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
bool hasAndNotCompare(SDValue) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const override
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegRegOnly - Given the specified addressed, force it to be represented as an indexed [r+...
bool useSoftFloat() const override
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always benefits from combining into FMA for a given value type.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
bool isProfitableToHoist(Instruction *I) const override
isProfitableToHoist - Check if it is profitable to hoist instruction I to its dominator block.
bool convertSelectOfConstantsToMath(EVT VT) const override
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint, return the type of constraint it is for this target.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const override
Return true if the target shall perform extract vector element and store given that the vector is kno...
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
bool isEqualityCmpFoldedWithSignedCmp() const override
Return true if instruction generated for equality comparison is folded with instruction generated for...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
getPreferredVectorAction - The code we generate when vector types are legalized by promoting the inte...
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const
void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const override
unsigned getStackProbeSize(const MachineFunction &MF) const
PPCTargetLowering(const PPCTargetMachine &TM, const PPCSubtarget &STI)
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
bool shouldKeepZExtForFP16Conv() const override
Does this target require the clearing of high-order bits in a register passed to the fp16 to fp conve...
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation.
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
bool SelectAddressRegImm34(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
Similar to the 16-bit case but for instructions that take a 34-bit displacement field (prefixed loads...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool isJumpTableRelative() const override
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
PPC::AddrMode SelectOptimalAddrMode(const SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign Align) const
SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode), compute the address flags of...
bool SelectAddressPCRel(SDValue N, SDValue &Base) const
SelectAddressPCRel - Represent the specified address as pc relative to be represented as [pc+imm].
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressEVXRegReg - Given the specified addressed, check to see if it can be more efficiently re...
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool isAccessedAsGotIndirect(SDValue N) const
Align getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override
createFastISel - This method returns a target-specific FastISel object, or null if the target does no...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
Common code between 32-bit and 64-bit PowerPC targets.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Provides information about what library functions are available for the current target.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
SelectSupportKind
Enum that describes what type of support for selects the target has.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
NegatibleCost
Enum that specifies when a float negation is beneficial.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
TargetLowering(const TargetLowering &)=delete
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Define some predicates that are used for node matching.
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction.
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getSplatIdxForPPCMnemonics - Return the splat index as a value that is appropriate for PPC mnemonics ...
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1.
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
bool checkConvertToNonDenormSingle(APFloat &ArgAPFloat)
InstructionCost Cost
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate,...
bool convertToNonDenormSingle(APInt &ArgAPInt)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool isIntS34Immediate(SDNode *N, int64_t &Imm)
isIntS34Immediate - This method tests if value of node given can be accurately represented as a sign ...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:157
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Structure that collects some common arguments that get passed around between the functions for call l...
CallFlags(CallingConv::ID CC, bool IsTailCall, bool IsVarArg, bool IsPatchPoint, bool IsIndirect, bool HasNest, bool NoMerge)