LLVM 22.0.0git
ARMISelLowering.h
Go to the documentation of this file.
1//===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that ARM uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
15#define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
16
19#include "llvm/ADT/StringRef.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/CallingConv.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/InlineAsm.h"
32#include <optional>
33#include <utility>
34
35namespace llvm {
36
38class ARMSubtarget;
39class DataLayout;
40class FastISel;
42class GlobalValue;
44class Instruction;
45class IRBuilderBase;
47class MachineInstr;
48class SelectionDAG;
50class TargetMachine;
52class VectorType;
53
54 namespace ARMISD {
55
56 // ARM Specific DAG Nodes
57 enum NodeType : unsigned {
58 // Start the numbering where the builtin ops and target ops leave off.
60
61 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
62 // TargetExternalSymbol, and TargetGlobalAddress.
63 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
64 // PIC mode.
65 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
66
67 // Add pseudo op to model memcpy for struct byval.
69
70 CALL, // Function call.
71 CALL_PRED, // Function call that's predicable.
72 CALL_NOLINK, // Function call with branch not branch-and-link.
73 tSECALL, // CMSE non-secure function call.
74 t2CALL_BTI, // Thumb function call followed by BTI instruction.
75 BRCOND, // Conditional branch.
76 BR_JT, // Jumptable branch.
77 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
78 RET_GLUE, // Return with a flag operand.
79 SERET_GLUE, // CMSE Entry function return with a flag operand.
80 INTRET_GLUE, // Interrupt return with an LR-offset and a flag operand.
81
82 PIC_ADD, // Add with a PC operand and a PIC label.
83
84 ASRL, // MVE long arithmetic shift right.
85 LSRL, // MVE long shift right.
86 LSLL, // MVE long shift left.
87
88 CMP, // ARM compare instructions.
89 CMN, // ARM CMN instructions.
90 CMPZ, // ARM compare that sets only Z flag.
91 CMPFP, // ARM VFP compare instruction, sets FPSCR.
92 CMPFPE, // ARM VFP signalling compare instruction, sets FPSCR.
93 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
94 CMPFPEw0, // ARM VFP signalling compare against zero instruction, sets
95 // FPSCR.
96 FMSTAT, // ARM fmstat instruction.
97
98 CMOV, // ARM conditional move instructions.
99
100 SSAT, // Signed saturation
101 USAT, // Unsigned saturation
102
104
105 LSLS, // Flag-setting shift left.
106 LSRS1, // Flag-setting logical shift right by one bit.
107 ASRS1, // Flag-setting arithmetic shift right by one bit.
108 RRX, // Shift right one bit with carry in.
109
110 ADDC, // Add with carry
111 ADDE, // Add using carry
112 SUBC, // Sub with carry
113 SUBE, // Sub using carry
114
115 VMOVRRD, // double to two gprs.
116 VMOVDRR, // Two gprs to double.
117 VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr
118
119 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
120 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
121 EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
122
123 TC_RETURN, // Tail call return pseudo.
124
126
127 DYN_ALLOC, // Dynamic allocation on the stack.
128
129 MEMBARRIER_MCR, // Memory barrier (MCR)
130
131 PRELOAD, // Preload
132
133 WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
134 WIN__DBZCHK, // Windows' divide by zero check
135
136 WLS, // Low-overhead loops, While Loop Start branch. See t2WhileLoopStart
137 WLSSETUP, // Setup for the iteration count of a WLS. See t2WhileLoopSetup.
138 LOOP_DEC, // Really a part of LE, performs the sub
139 LE, // Low-overhead loops, Loop End
140
141 PREDICATE_CAST, // Predicate cast for MVE i1 types
142 VECTOR_REG_CAST, // Reinterpret the current contents of a vector register
143
144 MVESEXT, // Legalization aids for extending a vector into two/four vectors.
145 MVEZEXT, // or truncating two/four vectors into one. Eventually becomes
146 MVETRUNC, // stack store/load sequence, if not optimized to anything else.
147
148 VCMP, // Vector compare.
149 VCMPZ, // Vector compare to zero.
150 VTST, // Vector test bits.
151
152 // Vector shift by vector
153 VSHLs, // ...left/right by signed
154 VSHLu, // ...left/right by unsigned
155
156 // Vector shift by immediate:
157 VSHLIMM, // ...left
158 VSHRsIMM, // ...right (signed)
159 VSHRuIMM, // ...right (unsigned)
160
161 // Vector rounding shift by immediate:
162 VRSHRsIMM, // ...right (signed)
163 VRSHRuIMM, // ...right (unsigned)
164 VRSHRNIMM, // ...right narrow
165
166 // Vector saturating shift by immediate:
167 VQSHLsIMM, // ...left (signed)
168 VQSHLuIMM, // ...left (unsigned)
169 VQSHLsuIMM, // ...left (signed to unsigned)
170 VQSHRNsIMM, // ...right narrow (signed)
171 VQSHRNuIMM, // ...right narrow (unsigned)
172 VQSHRNsuIMM, // ...right narrow (signed to unsigned)
173
174 // Vector saturating rounding shift by immediate:
175 VQRSHRNsIMM, // ...right narrow (signed)
176 VQRSHRNuIMM, // ...right narrow (unsigned)
177 VQRSHRNsuIMM, // ...right narrow (signed to unsigned)
178
179 // Vector shift and insert:
180 VSLIIMM, // ...left
181 VSRIIMM, // ...right
182
183 // Vector get lane (VMOV scalar to ARM core register)
184 // (These are used for 8- and 16-bit element types only.)
185 VGETLANEu, // zero-extend vector extract element
186 VGETLANEs, // sign-extend vector extract element
187
188 // Vector move immediate and move negated immediate:
191
192 // Vector move f32 immediate:
194
195 // Move H <-> R, clearing top 16 bits
198
199 // Vector duplicate:
202
203 // Vector shuffles:
204 VEXT, // extract
205 VREV64, // reverse elements within 64-bit doublewords
206 VREV32, // reverse elements within 32-bit words
207 VREV16, // reverse elements within 16-bit halfwords
208 VZIP, // zip (interleave)
209 VUZP, // unzip (deinterleave)
210 VTRN, // transpose
211 VTBL1, // 1-register shuffle with mask
212 VTBL2, // 2-register shuffle with mask
213 VMOVN, // MVE vmovn
214
215 // MVE Saturating truncates
216 VQMOVNs, // Vector (V) Saturating (Q) Move and Narrow (N), signed (s)
217 VQMOVNu, // Vector (V) Saturating (Q) Move and Narrow (N), unsigned (u)
218
219 // MVE float <> half converts
220 VCVTN, // MVE vcvt f32 -> f16, truncating into either the bottom or top
221 // lanes
222 VCVTL, // MVE vcvt f16 -> f32, extending from either the bottom or top lanes
223
224 // MVE VIDUP instruction, taking a start value and increment.
226
227 // Vector multiply long:
228 VMULLs, // ...signed
229 VMULLu, // ...unsigned
230
231 VQDMULH, // MVE vqdmulh instruction
232
233 // MVE reductions
234 VADDVs, // sign- or zero-extend the elements of a vector to i32,
235 VADDVu, // add them all together, and return an i32 of their sum
236 VADDVps, // Same as VADDV[su] but with a v4i1 predicate mask
238 VADDLVs, // sign- or zero-extend elements to i64 and sum, returning
239 VADDLVu, // the low and high 32-bit halves of the sum
240 VADDLVAs, // Same as VADDLV[su] but also add an input accumulator
241 VADDLVAu, // provided as low and high halves
242 VADDLVps, // Same as VADDLV[su] but with a v4i1 predicate mask
244 VADDLVAps, // Same as VADDLVp[su] but with a v4i1 predicate mask
246 VMLAVs, // sign- or zero-extend the elements of two vectors to i32, multiply
247 VMLAVu, // them and add the results together, returning an i32 of the sum
248 VMLAVps, // Same as VMLAV[su] with a v4i1 predicate mask
250 VMLALVs, // Same as VMLAV but with i64, returning the low and
251 VMLALVu, // high 32-bit halves of the sum
252 VMLALVps, // Same as VMLALV[su] with a v4i1 predicate mask
254 VMLALVAs, // Same as VMLALV but also add an input accumulator
255 VMLALVAu, // provided as low and high halves
256 VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask
258 VMINVu, // Find minimum unsigned value of a vector and register
259 VMINVs, // Find minimum signed value of a vector and register
260 VMAXVu, // Find maximum unsigned value of a vector and register
261 VMAXVs, // Find maximum signed value of a vector and register
262
263 SMULWB, // Signed multiply word by half word, bottom
264 SMULWT, // Signed multiply word by half word, top
265 UMLAL, // 64bit Unsigned Accumulate Multiply
266 SMLAL, // 64bit Signed Accumulate Multiply
267 UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
268 SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16
269 SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
270 SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
271 SMLALTT, // 64-bit signed accumulate multiply top, top 16
272 SMLALD, // Signed multiply accumulate long dual
273 SMLALDX, // Signed multiply accumulate long dual exchange
274 SMLSLD, // Signed multiply subtract long dual
275 SMLSLDX, // Signed multiply subtract long dual exchange
276 SMMLAR, // Signed multiply long, round and add
277 SMMLSR, // Signed multiply long, subtract and round
278
279 // Single Lane QADD8 and QADD16. Only the bottom lane. That's what the b
280 // stands for.
289
290 // Operands of the standard BUILD_VECTOR node are not legalized, which
291 // is fine if BUILD_VECTORs are always lowered to shuffles or other
292 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
293 // operands need to be legalized. Define an ARM-specific version of
294 // BUILD_VECTOR for this purpose.
296
297 // Bit-field insert
299
300 // Vector OR with immediate
302 // Vector AND with NOT of immediate
304
305 // Pseudo vector bitwise select
307
308 // Pseudo-instruction representing a memory copy using ldm/stm
309 // instructions.
311
312 // Pseudo-instruction representing a memory copy using a tail predicated
313 // loop
315 // Pseudo-instruction representing a memset using a tail predicated
316 // loop
318
319 // V8.1MMainline condition select
320 CSINV, // Conditional select invert.
321 CSNEG, // Conditional select negate.
322 CSINC, // Conditional select increment.
323
324 // Vector load N-element structure to all lanes:
330
331 // NEON loads with post-increment base updates:
346
347 // NEON stores with post-increment base updates:
358
359 // Load/Store of dual registers
363 };
364
365 } // end namespace ARMISD
366
367 namespace ARM {
368 /// Possible values of current rounding mode, which is specified in bits
369 /// 23:22 of FPSCR.
370 enum Rounding {
371 RN = 0, // Round to Nearest
372 RP = 1, // Round towards Plus infinity
373 RM = 2, // Round towards Minus infinity
374 RZ = 3, // Round towards Zero
375 rmMask = 3 // Bit mask selecting rounding mode
376 };
377
378 // Bit position of rounding mode bits in FPSCR.
379 const unsigned RoundingBitsPos = 22;
380
381 // Bits of floating-point status. These are NZCV flags, QC bit and cumulative
382 // FP exception bits.
383 const unsigned FPStatusBits = 0xf800009f;
384
385 // Some bits in the FPSCR are not yet defined. They must be preserved when
386 // modifying the contents.
387 const unsigned FPReservedBits = 0x00006060;
388 } // namespace ARM
389
390 /// Define some predicates that are used for node matching.
391 namespace ARM {
392
393 bool isBitFieldInvertedMask(unsigned v);
394
395 } // end namespace ARM
396
397 //===--------------------------------------------------------------------===//
398 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
399
401 // Copying needed for an outgoing byval argument.
402 enum ByValCopyKind {
403 // Argument is already in the correct location, no copy needed.
404 NoCopy,
405 // Argument value is currently in the local stack frame, needs copying to
406 // outgoing arguemnt area.
407 CopyOnce,
408 // Argument value is currently in the outgoing argument area, but not at
409 // the correct offset, so needs copying via a temporary in local stack
410 // space.
411 CopyViaTemp,
412 };
413
414 public:
415 explicit ARMTargetLowering(const TargetMachine &TM,
416 const ARMSubtarget &STI);
417
418 const ARMBaseTargetMachine &getTM() const;
419
420 unsigned getJumpTableEncoding() const override;
421 bool useSoftFloat() const override;
422
423 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
424
425 /// ReplaceNodeResults - Replace the results of node with an illegal result
426 /// type with new values built out of custom code.
428 SelectionDAG &DAG) const override;
429
430 const char *getTargetNodeName(unsigned Opcode) const override;
431
432 bool isSelectSupported(SelectSupportKind Kind) const override {
433 // ARM does not support scalar condition selects on vectors.
434 return (Kind != ScalarCondVectorVal);
435 }
436
437 bool isReadOnly(const GlobalValue *GV) const;
438
439 /// getSetCCResultType - Return the value type to use for ISD::SETCC.
441 EVT VT) const override;
442
445 MachineBasicBlock *MBB) const override;
446
448 SDNode *Node) const override;
449
450 bool supportKCFIBundles() const override;
451
454 const TargetInstrInfo *TII) const override;
455
459 SDValue PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const;
460 SDValue PerformMVEExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
461 SDValue PerformMVETruncCombine(SDNode *N, DAGCombinerInfo &DCI) const;
462 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
463
465 const APInt &OriginalDemandedBits,
466 const APInt &OriginalDemandedElts,
467 KnownBits &Known,
468 TargetLoweringOpt &TLO,
469 unsigned Depth) const override;
470
471 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
472
473 /// allowsMisalignedMemoryAccesses - Returns true if the target allows
474 /// unaligned memory accesses of the specified type. Returns whether it
475 /// is "fast" by reference in the second argument.
476 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
477 Align Alignment,
479 unsigned *Fast) const override;
480
481 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
482 const AttributeList &FuncAttributes) const override;
483
484 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
485 bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
486 bool isZExtFree(SDValue Val, EVT VT2) const override;
487 Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const override;
488
489 bool isFNegFree(EVT VT) const override;
490
491 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
492
493 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
494
495
496 /// isLegalAddressingMode - Return true if the addressing mode represented
497 /// by AM is legal for this target, for a load/store of the specified type.
498 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
499 Type *Ty, unsigned AS,
500 Instruction *I = nullptr) const override;
501
502 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
503
504 /// Returns true if the addressing mode representing by AM is legal
505 /// for the Thumb1 target, for a load/store of the specified type.
506 bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
507
508 /// isLegalICmpImmediate - Return true if the specified immediate is legal
509 /// icmp immediate, that is the target has icmp instructions which can
510 /// compare a register against the immediate without having to materialize
511 /// the immediate into a register.
512 bool isLegalICmpImmediate(int64_t Imm) const override;
513
514 /// isLegalAddImmediate - Return true if the specified immediate is legal
515 /// add immediate, that is the target has add instructions which can
516 /// add a register and the immediate without having to materialize
517 /// the immediate into a register.
518 bool isLegalAddImmediate(int64_t Imm) const override;
519
520 /// getPreIndexedAddressParts - returns true by value, base pointer and
521 /// offset pointer and addressing mode by reference if the node's address
522 /// can be legally represented as pre-indexed load / store address.
525 SelectionDAG &DAG) const override;
526
527 /// getPostIndexedAddressParts - returns true by value, base pointer and
528 /// offset pointer and addressing mode by reference if this node can be
529 /// combined with a load / store to form a post-indexed load / store.
532 SelectionDAG &DAG) const override;
533
535 const APInt &DemandedElts,
536 const SelectionDAG &DAG,
537 unsigned Depth) const override;
538
540 const APInt &DemandedElts,
541 TargetLoweringOpt &TLO) const override;
542
543 ConstraintType getConstraintType(StringRef Constraint) const override;
544
545 /// Examine constraint string and operand type and determine a weight value.
546 /// The operand object must already have been set up with the operand type.
548 AsmOperandInfo &info, const char *constraint) const override;
549
550 std::pair<unsigned, const TargetRegisterClass *>
552 StringRef Constraint, MVT VT) const override;
553
554 const char *LowerXConstraint(EVT ConstraintVT) const override;
555
556 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
557 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
558 /// true it means one of the asm constraint of the inline asm instruction
559 /// being processed is 'm'.
561 std::vector<SDValue> &Ops,
562 SelectionDAG &DAG) const override;
563
565 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
566 if (ConstraintCode == "Q")
568 if (ConstraintCode.size() == 2) {
569 if (ConstraintCode[0] == 'U') {
570 switch(ConstraintCode[1]) {
571 default:
572 break;
573 case 'm':
575 case 'n':
577 case 'q':
579 case 's':
581 case 't':
583 case 'v':
585 case 'y':
587 }
588 }
589 }
590 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
591 }
592
593 const ARMSubtarget* getSubtarget() const {
594 return Subtarget;
595 }
596
597 /// getRegClassFor - Return the register class that should be used for the
598 /// specified value type.
599 const TargetRegisterClass *
600 getRegClassFor(MVT VT, bool isDivergent = false) const override;
601
602 bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
603 Align &PrefAlign) const override;
604
605 /// createFastISel - This method returns a target specific FastISel object,
606 /// or null if the target does not support "fast" ISel.
608 const TargetLibraryInfo *libInfo) const override;
609
611
612 bool preferZeroCompareBranch() const override { return true; }
613
614 bool preferSelectsOverBooleanArithmetic(EVT VT) const override;
615
616 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
617
618 bool hasAndNotCompare(SDValue V) const override {
619 // We can use bics for any scalar.
620 return V.getValueType().isScalarInteger();
621 }
622
623 bool
624 isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
625 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
626
627 /// isFPImmLegal - Returns true if the target can instruction select the
628 /// specified FP immediate natively. If false, the legalizer will
629 /// materialize the FP immediate as a load from a constant pool.
630 bool isFPImmLegal(const APFloat &Imm, EVT VT,
631 bool ForCodeSize = false) const override;
632
633 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
634 const CallInst &I,
635 MachineFunction &MF,
636 unsigned Intrinsic) const override;
637
638 /// Returns true if it is beneficial to convert a load of a constant
639 /// to just the constant itself.
641 Type *Ty) const override;
642
643 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
644 /// with this index.
645 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
646 unsigned Index) const override;
647
648 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
649 bool MathUsed) const override {
650 // Using overflow ops for overflow checks only should beneficial on ARM.
651 return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
652 }
653
654 bool shouldReassociateReduction(unsigned Opc, EVT VT) const override {
655 return Opc != ISD::VECREDUCE_ADD;
656 }
657
658 /// Returns true if an argument of type Ty needs to be passed in a
659 /// contiguous block of registers in calling convention CallConv.
661 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
662 const DataLayout &DL) const override;
663
664 /// If a physical register, this returns the register that receives the
665 /// exception address on entry to an EH pad.
667 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
668
669 /// If a physical register, this returns the register that receives the
670 /// exception typeid on entry to a landing pad.
672 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
673
675 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
676 AtomicOrdering Ord) const override;
677 Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
678 AtomicOrdering Ord) const override;
679
680 void
681 emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
682
684 AtomicOrdering Ord) const override;
686 AtomicOrdering Ord) const override;
687
688 unsigned getMaxSupportedInterleaveFactor() const override;
689
690 bool lowerInterleavedLoad(Instruction *Load, Value *Mask,
692 ArrayRef<unsigned> Indices, unsigned Factor,
693 const APInt &GapMask) const override;
694 bool lowerInterleavedStore(Instruction *Store, Value *Mask,
695 ShuffleVectorInst *SVI, unsigned Factor,
696 const APInt &GapMask) const override;
697
698 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
700 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
704 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
707
708 bool useLoadStackGuardNode(const Module &M) const override;
709
710 void insertSSPDeclarations(Module &M) const override;
711
712 bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
713 unsigned &Cost) const override;
714
716 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
717 bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const override;
718
719 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
720 const MachineFunction &MF) const override {
721 // Do not merge to larger than i32.
722 return (MemVT.getSizeInBits() <= 32);
723 }
724
725 bool isCheapToSpeculateCttz(Type *Ty) const override;
726 bool isCheapToSpeculateCtlz(Type *Ty) const override;
727
728 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
729 return VT.isScalarInteger();
730 }
731
732 bool supportSwiftError() const override {
733 return true;
734 }
735
736 bool supportSplitCSR(MachineFunction *MF) const override {
738 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
739 }
740
741 bool hasStandaloneRem(EVT VT) const override {
742 return HasStandaloneRem;
743 }
744
747 unsigned ExpansionFactor) const override;
748
749 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const;
750 CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const;
751
752 /// Returns true if \p VecTy is a legal interleaved access type. This
753 /// function checks the vector element type and the overall width of the
754 /// vector.
755 bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy,
756 Align Alignment,
757 const DataLayout &DL) const;
758
760 SDValue ConstNode) const override;
761
762 bool alignLoopsWithOptSize() const override;
763
764 /// Returns the number of interleaved accesses that will be generated when
765 /// lowering accesses of the given type.
767 const DataLayout &DL) const;
768
769 void finalizeLowering(MachineFunction &MF) const override;
770
771 /// Return the correct alignment for the current calling convention.
773 const DataLayout &DL) const override;
774
776 CombineLevel Level) const override;
777
778 bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
779
780 bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override;
781
782 /// Return true if it is profitable to fold a pair of shifts into a mask.
784 EVT VT = Y.getValueType();
785
786 if (VT.isVector())
787 return false;
788
789 return VT.getScalarSizeInBits() <= 32;
790 }
791
792 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
793 unsigned SelectOpcode, SDValue X,
794 SDValue Y) const override;
795
796 bool preferIncOfAddToSubOfNot(EVT VT) const override;
797
798 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
799
800 bool isComplexDeinterleavingSupported() const override;
802 ComplexDeinterleavingOperation Operation, Type *Ty) const override;
803
806 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
807 Value *Accumulator = nullptr) const override;
808
809 bool softPromoteHalfType() const override { return true; }
810
811 bool useFPRegsForHalfType() const override { return true; }
812
813 protected:
814 std::pair<const TargetRegisterClass *, uint8_t>
816 MVT VT) const override;
817
818 private:
819 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
820 /// make the right decision when generating code for different targets.
821 const ARMSubtarget *Subtarget;
822
824
825 const InstrItineraryData *Itins;
826
827 // TODO: remove this, and have shouldInsertFencesForAtomic do the proper
828 // check.
829 bool InsertFencesForAtomic;
830
831 bool HasStandaloneRem = true;
832
833 void addTypeForNEON(MVT VT, MVT PromotedLdStVT);
834 void addDRTypeForNEON(MVT VT);
835 void addQRTypeForNEON(MVT VT);
836 std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
837
838 using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>;
839
840 void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
841 SDValue &Arg, RegsToPassVector &RegsToPass,
842 CCValAssign &VA, CCValAssign &NextVA,
843 SDValue &StackPtr,
844 SmallVectorImpl<SDValue> &MemOpChains,
845 bool IsTailCall,
846 int SPDiff) const;
847 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
848 SDValue &Root, SelectionDAG &DAG,
849 const SDLoc &dl) const;
850
851 CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
852 bool isVarArg) const;
853 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
854 bool isVarArg) const;
855 std::pair<SDValue, MachinePointerInfo>
856 computeAddrForCallArg(const SDLoc &dl, SelectionDAG &DAG,
857 const CCValAssign &VA, SDValue StackPtr,
858 bool IsTailCall, int SPDiff) const;
859 ByValCopyKind ByValNeedsCopyForTailCall(SelectionDAG &DAG, SDValue Src,
860 SDValue Dst,
861 ISD::ArgFlagsTy Flags) const;
862 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
863 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
864 SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
865 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG,
866 const ARMSubtarget *Subtarget) const;
867 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
868 const ARMSubtarget *Subtarget) const;
869 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
870 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
871 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
872 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
873 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
874 SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
875 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
876 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
877 SelectionDAG &DAG) const;
878 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
879 SelectionDAG &DAG,
880 TLSModel::Model model) const;
881 SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
882 SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const;
883 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
884 SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const;
885 SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const;
886 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
893 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
894 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
895 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
896 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
897 SDValue LowerSET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
898 SDValue LowerRESET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
899 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
900 const ARMSubtarget *ST) const;
901 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
902 const ARMSubtarget *ST) const;
903 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
905 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
906 SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const;
907 void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed,
909 SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
910 const ARMSubtarget *Subtarget) const;
911 SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed,
912 SDValue &Chain) const;
913 SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const;
915 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
916 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
917 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
918 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
919 SDValue LowerFSETCC(SDValue Op, SelectionDAG &DAG) const;
920 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
922 SelectionDAG &DAG) const;
923 SDValue LowerFP_TO_BF16(SDValue Op, SelectionDAG &DAG) const;
924 SDValue LowerCMP(SDValue Op, SelectionDAG &DAG) const;
926
927 Register getRegisterByName(const char* RegName, LLT VT,
928 const MachineFunction &MF) const override;
929
930 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
931 SmallVectorImpl<SDNode *> &Created) const override;
932
933 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
934 EVT VT) const override;
935
936 SDValue MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT, MVT ValVT,
937 SDValue Val) const;
938 SDValue MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT,
939 MVT ValVT, SDValue Val) const;
940
941 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
942
944 CallingConv::ID CallConv, bool isVarArg,
946 const SDLoc &dl, SelectionDAG &DAG,
947 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
948 SDValue ThisVal, bool isCmseNSCall) const;
949
950 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
951 void insertCopiesSplitCSR(
952 MachineBasicBlock *Entry,
953 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
954
955 bool splitValueIntoRegisterParts(
956 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
957 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
958 const override;
959
960 SDValue joinRegisterPartsIntoValue(
961 SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts,
962 unsigned NumParts, MVT PartVT, EVT ValueVT,
963 std::optional<CallingConv::ID> CC) const override;
964
965 SDValue
966 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
968 const SDLoc &dl, SelectionDAG &DAG,
969 SmallVectorImpl<SDValue> &InVals) const override;
970
971 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
972 SDValue &Chain, const Value *OrigArg,
973 unsigned InRegsParamRecordIdx, int ArgOffset,
974 unsigned ArgSize) const;
975
976 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
977 const SDLoc &dl, SDValue &Chain,
978 unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
979 bool ForceMutable = false) const;
980
982 SmallVectorImpl<SDValue> &InVals) const override;
983
984 /// HandleByVal - Target-specific cleanup for ByVal support.
985 void HandleByVal(CCState *, unsigned &, Align) const override;
986
987 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
988 /// for tail call optimization. Targets which want to do tail call
989 /// optimization should implement this function.
990 bool IsEligibleForTailCallOptimization(
992 SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const;
993
994 bool CanLowerReturn(CallingConv::ID CallConv,
995 MachineFunction &MF, bool isVarArg,
997 LLVMContext &Context, const Type *RetTy) const override;
998
999 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1001 const SmallVectorImpl<SDValue> &OutVals,
1002 const SDLoc &dl, SelectionDAG &DAG) const override;
1003
1004 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
1005
1006 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1007
1008 bool shouldConsiderGEPOffsetSplit() const override { return true; }
1009
1010 bool isUnsupportedFloatingType(EVT VT) const;
1011
1013
1014 SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
1015 SDValue ARMcc, SDValue Flags, SelectionDAG &DAG) const;
1016 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
1017 SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
1018 SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
1019 const SDLoc &dl, bool Signaling = false) const;
1020
1021 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
1022
1023 void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
1024 MachineBasicBlock *DispatchBB, int FI) const;
1025
1026 void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const;
1027
1028 MachineBasicBlock *EmitStructByval(MachineInstr &MI,
1029 MachineBasicBlock *MBB) const;
1030
1031 MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
1032 MachineBasicBlock *MBB) const;
1033 MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
1034 MachineBasicBlock *MBB) const;
1035 void addMVEVectorTypes(bool HasMVEFP);
1036 void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action);
1037 void setAllExpand(MVT VT);
1038 };
1039
1046
1047 namespace ARM {
1048
1049 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1050 const TargetLibraryInfo *libInfo);
1051
1052 } // end namespace ARM
1053
1054} // end namespace llvm
1055
1056#endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
static SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG)
return SDValue()
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
lazy value info
#define I(x, y, z)
Definition MD5.cpp:58
Register const TargetRegisterInfo * TRI
PowerPC Reduce CR logical Operation
This file defines the SmallVector class.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
Value * RHS
Value * LHS
static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
Class for arbitrary precision integers.
Definition APInt.h:78
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool isReadOnly(const GlobalValue *GV) const
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const override
Return the correct alignment for the current calling convention.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount through its operand,...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
const ARMSubtarget * getSubtarget() const
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const
Returns true if the addressing mode representing by AM is legal for the Thumb1 target,...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, Align &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
void finalizeLowering(MachineFunction &MF) const override
Execute target specific actions to finalize target lowering.
SDValue PerformMVETruncCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize=false) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
bool useFPRegsForHalfType() const override
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
SDValue PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const
PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override
Return true if it is profitable to fold a pair of shifts into a mask.
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT...
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const override
Create the IR node for the given complex deinterleaving operation.
bool isComplexDeinterleavingSupported() const override
Does this target support complex deinterleaving.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
SDValue PerformMVEExtCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &OriginalDemandedBits, const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the value type to use for ISD::SETCC.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
isShuffleMaskLegal - Targets can use this to indicate that they only support some VECTOR_SHUFFLE oper...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Try to convert math with an overflow comparison into the corresponding DAG node operation.
bool lowerInterleavedStore(Instruction *Store, Value *Mask, ShuffleVectorInst *SVI, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved store into a vstN intrinsic.
bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const override
Return true if Op can create undef or poison from non-undef & non-poison operands.
bool isSelectSupported(SelectSupportKind Kind) const override
const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const override
getRegClassFor - Return the register class that should be used for the specified value type.
bool useLoadStackGuardNode(const Module &M) const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
bool lowerInterleavedLoad(Instruction *Load, Value *Mask, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved load into a vldN intrinsic.
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
bool preferSelectsOverBooleanArithmetic(EVT VT) const override
Should we prefer selects to doing arithmetic on boolean types.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool shouldReassociateReduction(unsigned Opc, EVT VT) const override
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
ARMTargetLowering(const TargetMachine &TM, const ARMSubtarget &STI)
bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const override
Does this target support complex deinterleaving with the given operation and type.
bool supportKCFIBundles() const override
Return true if the target supports kcfi operand bundles.
SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const
PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
Type * shouldConvertSplatType(ShuffleVectorInst *SVI) const override
Given a shuffle vector SVI representing a vector splat, return a new scalar type of size equal to SVI...
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
Instruction * makeDMB(IRBuilderBase &Builder, ARM_MB::MemBOpt Domain) const
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
const char * LowerXConstraint(EVT ConstraintVT) const override
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy, Align Alignment, const DataLayout &DL) const
Returns true if VecTy is a legal interleaved access type.
bool isVectorLoadExtDesirable(SDValue ExtVal) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable.
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override
Return true if the target can combine store(extractelement VectorTy,Idx).
bool preferZeroCompareBranch() const override
Return true if the heuristic to prefer icmp eq zero should be used in code gen prepare.
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool useSoftFloat() const override
bool alignLoopsWithOptSize() const override
Should loops be aligned even when the function is marked OptSize (but not MinSize).
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override
Return true if it is profitable to fold a pair of shifts into a mask.
const ARMBaseTargetMachine & getTM() const
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool softPromoteHalfType() const override
bool hasStandaloneRem(EVT VT) const override
Return true if the target can handle a standalone remainder operation.
ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const override
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
Class to represent fixed width SIMD vectors.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Itinerary data supplied by a subtarget to be used by a target.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Machine Value Type.
Instructions::iterator instr_iterator
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
SelectSupportKind
Enum that describes what type of support for selects the target has.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
virtual bool shouldConsiderGEPOffsetSplit() const
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
TargetLowering(const TargetLowering &)=delete
virtual ArrayRef< MCPhysReg > getRoundingControlRegisters() const
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
Define some predicates that are used for node matching.
Definition ARMEHABI.h:25
bool isBitFieldInvertedMask(unsigned v)
const unsigned FPStatusBits
const unsigned FPReservedBits
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPSCR.
const unsigned RoundingBitsPos
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
InstructionCost Cost
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
AtomicOrdering
Atomic ordering for LLVM's memory model.
CombineLevel
Definition DAGCombine.h:15
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:157
This structure contains all information that is necessary for lowering calls.