LLVM 20.0.0git
ARMISelLowering.h
Go to the documentation of this file.
1//===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that ARM uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
15#define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
16
19#include "llvm/ADT/StringRef.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/CallingConv.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/InlineAsm.h"
32#include <optional>
33#include <utility>
34
35namespace llvm {
36
37class ARMSubtarget;
38class DataLayout;
39class FastISel;
40class FunctionLoweringInfo;
41class GlobalValue;
42class InstrItineraryData;
43class Instruction;
44class IRBuilderBase;
45class MachineBasicBlock;
46class MachineInstr;
47class SelectionDAG;
48class TargetLibraryInfo;
49class TargetMachine;
50class TargetRegisterInfo;
51class VectorType;
52
53 namespace ARMISD {
54
55 // ARM Specific DAG Nodes
56 enum NodeType : unsigned {
57 // Start the numbering where the builtin ops and target ops leave off.
59
60 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
61 // TargetExternalSymbol, and TargetGlobalAddress.
62 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
63 // PIC mode.
64 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
65
66 // Add pseudo op to model memcpy for struct byval.
68
69 CALL, // Function call.
70 CALL_PRED, // Function call that's predicable.
71 CALL_NOLINK, // Function call with branch not branch-and-link.
72 tSECALL, // CMSE non-secure function call.
73 t2CALL_BTI, // Thumb function call followed by BTI instruction.
74 BRCOND, // Conditional branch.
75 BR_JT, // Jumptable branch.
76 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
77 RET_GLUE, // Return with a flag operand.
78 SERET_GLUE, // CMSE Entry function return with a flag operand.
79 INTRET_GLUE, // Interrupt return with an LR-offset and a flag operand.
80
81 PIC_ADD, // Add with a PC operand and a PIC label.
82
83 ASRL, // MVE long arithmetic shift right.
84 LSRL, // MVE long shift right.
85 LSLL, // MVE long shift left.
86
87 CMP, // ARM compare instructions.
88 CMN, // ARM CMN instructions.
89 CMPZ, // ARM compare that sets only Z flag.
90 CMPFP, // ARM VFP compare instruction, sets FPSCR.
91 CMPFPE, // ARM VFP signalling compare instruction, sets FPSCR.
92 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
93 CMPFPEw0, // ARM VFP signalling compare against zero instruction, sets
94 // FPSCR.
95 FMSTAT, // ARM fmstat instruction.
96
97 CMOV, // ARM conditional move instructions.
98
99 SSAT, // Signed saturation
100 USAT, // Unsigned saturation
101
103
104 LSLS, // Flag-setting shift left.
105 LSRS1, // Flag-setting logical shift right by one bit.
106 ASRS1, // Flag-setting arithmetic shift right by one bit.
107 RRX, // Shift right one bit with carry in.
108
109 ADDC, // Add with carry
110 ADDE, // Add using carry
111 SUBC, // Sub with carry
112 SUBE, // Sub using carry
113
114 VMOVRRD, // double to two gprs.
115 VMOVDRR, // Two gprs to double.
116 VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr
117
118 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
119 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
120 EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
121
122 TC_RETURN, // Tail call return pseudo.
123
125
126 DYN_ALLOC, // Dynamic allocation on the stack.
127
128 MEMBARRIER_MCR, // Memory barrier (MCR)
129
130 PRELOAD, // Preload
131
132 WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
133 WIN__DBZCHK, // Windows' divide by zero check
134
135 WLS, // Low-overhead loops, While Loop Start branch. See t2WhileLoopStart
136 WLSSETUP, // Setup for the iteration count of a WLS. See t2WhileLoopSetup.
137 LOOP_DEC, // Really a part of LE, performs the sub
138 LE, // Low-overhead loops, Loop End
139
140 PREDICATE_CAST, // Predicate cast for MVE i1 types
141 VECTOR_REG_CAST, // Reinterpret the current contents of a vector register
142
143 MVESEXT, // Legalization aids for extending a vector into two/four vectors.
144 MVEZEXT, // or truncating two/four vectors into one. Eventually becomes
145 MVETRUNC, // stack store/load sequence, if not optimized to anything else.
146
147 VCMP, // Vector compare.
148 VCMPZ, // Vector compare to zero.
149 VTST, // Vector test bits.
150
151 // Vector shift by vector
152 VSHLs, // ...left/right by signed
153 VSHLu, // ...left/right by unsigned
154
155 // Vector shift by immediate:
156 VSHLIMM, // ...left
157 VSHRsIMM, // ...right (signed)
158 VSHRuIMM, // ...right (unsigned)
159
160 // Vector rounding shift by immediate:
161 VRSHRsIMM, // ...right (signed)
162 VRSHRuIMM, // ...right (unsigned)
163 VRSHRNIMM, // ...right narrow
164
165 // Vector saturating shift by immediate:
166 VQSHLsIMM, // ...left (signed)
167 VQSHLuIMM, // ...left (unsigned)
168 VQSHLsuIMM, // ...left (signed to unsigned)
169 VQSHRNsIMM, // ...right narrow (signed)
170 VQSHRNuIMM, // ...right narrow (unsigned)
171 VQSHRNsuIMM, // ...right narrow (signed to unsigned)
172
173 // Vector saturating rounding shift by immediate:
174 VQRSHRNsIMM, // ...right narrow (signed)
175 VQRSHRNuIMM, // ...right narrow (unsigned)
176 VQRSHRNsuIMM, // ...right narrow (signed to unsigned)
177
178 // Vector shift and insert:
179 VSLIIMM, // ...left
180 VSRIIMM, // ...right
181
182 // Vector get lane (VMOV scalar to ARM core register)
183 // (These are used for 8- and 16-bit element types only.)
184 VGETLANEu, // zero-extend vector extract element
185 VGETLANEs, // sign-extend vector extract element
186
187 // Vector move immediate and move negated immediate:
190
191 // Vector move f32 immediate:
193
194 // Move H <-> R, clearing top 16 bits
197
198 // Vector duplicate:
201
202 // Vector shuffles:
203 VEXT, // extract
204 VREV64, // reverse elements within 64-bit doublewords
205 VREV32, // reverse elements within 32-bit words
206 VREV16, // reverse elements within 16-bit halfwords
207 VZIP, // zip (interleave)
208 VUZP, // unzip (deinterleave)
209 VTRN, // transpose
210 VTBL1, // 1-register shuffle with mask
211 VTBL2, // 2-register shuffle with mask
212 VMOVN, // MVE vmovn
213
214 // MVE Saturating truncates
215 VQMOVNs, // Vector (V) Saturating (Q) Move and Narrow (N), signed (s)
216 VQMOVNu, // Vector (V) Saturating (Q) Move and Narrow (N), unsigned (u)
217
218 // MVE float <> half converts
219 VCVTN, // MVE vcvt f32 -> f16, truncating into either the bottom or top
220 // lanes
221 VCVTL, // MVE vcvt f16 -> f32, extending from either the bottom or top lanes
222
223 // MVE VIDUP instruction, taking a start value and increment.
225
226 // Vector multiply long:
227 VMULLs, // ...signed
228 VMULLu, // ...unsigned
229
230 VQDMULH, // MVE vqdmulh instruction
231
232 // MVE reductions
233 VADDVs, // sign- or zero-extend the elements of a vector to i32,
234 VADDVu, // add them all together, and return an i32 of their sum
235 VADDVps, // Same as VADDV[su] but with a v4i1 predicate mask
237 VADDLVs, // sign- or zero-extend elements to i64 and sum, returning
238 VADDLVu, // the low and high 32-bit halves of the sum
239 VADDLVAs, // Same as VADDLV[su] but also add an input accumulator
240 VADDLVAu, // provided as low and high halves
241 VADDLVps, // Same as VADDLV[su] but with a v4i1 predicate mask
243 VADDLVAps, // Same as VADDLVp[su] but with a v4i1 predicate mask
245 VMLAVs, // sign- or zero-extend the elements of two vectors to i32, multiply
246 VMLAVu, // them and add the results together, returning an i32 of the sum
247 VMLAVps, // Same as VMLAV[su] with a v4i1 predicate mask
249 VMLALVs, // Same as VMLAV but with i64, returning the low and
250 VMLALVu, // high 32-bit halves of the sum
251 VMLALVps, // Same as VMLALV[su] with a v4i1 predicate mask
253 VMLALVAs, // Same as VMLALV but also add an input accumulator
254 VMLALVAu, // provided as low and high halves
255 VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask
257 VMINVu, // Find minimum unsigned value of a vector and register
258 VMINVs, // Find minimum signed value of a vector and register
259 VMAXVu, // Find maximum unsigned value of a vector and register
260 VMAXVs, // Find maximum signed value of a vector and register
261
262 SMULWB, // Signed multiply word by half word, bottom
263 SMULWT, // Signed multiply word by half word, top
264 UMLAL, // 64bit Unsigned Accumulate Multiply
265 SMLAL, // 64bit Signed Accumulate Multiply
266 UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
267 SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16
268 SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
269 SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
270 SMLALTT, // 64-bit signed accumulate multiply top, top 16
271 SMLALD, // Signed multiply accumulate long dual
272 SMLALDX, // Signed multiply accumulate long dual exchange
273 SMLSLD, // Signed multiply subtract long dual
274 SMLSLDX, // Signed multiply subtract long dual exchange
275 SMMLAR, // Signed multiply long, round and add
276 SMMLSR, // Signed multiply long, subtract and round
277
278 // Single Lane QADD8 and QADD16. Only the bottom lane. That's what the b
279 // stands for.
288
289 // Operands of the standard BUILD_VECTOR node are not legalized, which
290 // is fine if BUILD_VECTORs are always lowered to shuffles or other
291 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
292 // operands need to be legalized. Define an ARM-specific version of
293 // BUILD_VECTOR for this purpose.
295
296 // Bit-field insert
298
299 // Vector OR with immediate
301 // Vector AND with NOT of immediate
303
304 // Pseudo vector bitwise select
306
307 // Pseudo-instruction representing a memory copy using ldm/stm
308 // instructions.
310
311 // Pseudo-instruction representing a memory copy using a tail predicated
312 // loop
314 // Pseudo-instruction representing a memset using a tail predicated
315 // loop
317
318 // V8.1MMainline condition select
319 CSINV, // Conditional select invert.
320 CSNEG, // Conditional select negate.
321 CSINC, // Conditional select increment.
322
323 // Vector load N-element structure to all lanes:
328
329 // NEON loads with post-increment base updates:
344
345 // NEON stores with post-increment base updates:
356
357 // Load/Store of dual registers
359 STRD
360 };
361
362 } // end namespace ARMISD
363
364 namespace ARM {
365 /// Possible values of current rounding mode, which is specified in bits
366 /// 23:22 of FPSCR.
367 enum Rounding {
368 RN = 0, // Round to Nearest
369 RP = 1, // Round towards Plus infinity
370 RM = 2, // Round towards Minus infinity
371 RZ = 3, // Round towards Zero
372 rmMask = 3 // Bit mask selecting rounding mode
373 };
374
375 // Bit position of rounding mode bits in FPSCR.
376 const unsigned RoundingBitsPos = 22;
377
378 // Bits of floating-point status. These are NZCV flags, QC bit and cumulative
379 // FP exception bits.
380 const unsigned FPStatusBits = 0xf800009f;
381
382 // Some bits in the FPSCR are not yet defined. They must be preserved when
383 // modifying the contents.
384 const unsigned FPReservedBits = 0x00006060;
385 } // namespace ARM
386
387 /// Define some predicates that are used for node matching.
388 namespace ARM {
389
390 bool isBitFieldInvertedMask(unsigned v);
391
392 } // end namespace ARM
393
394 //===--------------------------------------------------------------------===//
395 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
396
398 // Copying needed for an outgoing byval argument.
399 enum ByValCopyKind {
400 // Argument is already in the correct location, no copy needed.
401 NoCopy,
402 // Argument value is currently in the local stack frame, needs copying to
403 // outgoing arguemnt area.
404 CopyOnce,
405 // Argument value is currently in the outgoing argument area, but not at
406 // the correct offset, so needs copying via a temporary in local stack
407 // space.
408 CopyViaTemp,
409 };
410
411 public:
412 explicit ARMTargetLowering(const TargetMachine &TM,
413 const ARMSubtarget &STI);
414
415 unsigned getJumpTableEncoding() const override;
416 bool useSoftFloat() const override;
417
418 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
419
420 /// ReplaceNodeResults - Replace the results of node with an illegal result
421 /// type with new values built out of custom code.
423 SelectionDAG &DAG) const override;
424
425 const char *getTargetNodeName(unsigned Opcode) const override;
426
427 bool isSelectSupported(SelectSupportKind Kind) const override {
428 // ARM does not support scalar condition selects on vectors.
429 return (Kind != ScalarCondVectorVal);
430 }
431
432 bool isReadOnly(const GlobalValue *GV) const;
433
434 /// getSetCCResultType - Return the value type to use for ISD::SETCC.
436 EVT VT) const override;
437
440 MachineBasicBlock *MBB) const override;
441
443 SDNode *Node) const override;
444
448 SDValue PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const;
449 SDValue PerformMVEExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
450 SDValue PerformMVETruncCombine(SDNode *N, DAGCombinerInfo &DCI) const;
451 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
452
454 const APInt &OriginalDemandedBits,
455 const APInt &OriginalDemandedElts,
456 KnownBits &Known,
457 TargetLoweringOpt &TLO,
458 unsigned Depth) const override;
459
460 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
461
462 /// allowsMisalignedMemoryAccesses - Returns true if the target allows
463 /// unaligned memory accesses of the specified type. Returns whether it
464 /// is "fast" by reference in the second argument.
465 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
466 Align Alignment,
468 unsigned *Fast) const override;
469
471 const AttributeList &FuncAttributes) const override;
472
473 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
474 bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
475 bool isZExtFree(SDValue Val, EVT VT2) const override;
476 Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const override;
477
478 bool isFNegFree(EVT VT) const override;
479
480 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
481
482 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
483
484
485 /// isLegalAddressingMode - Return true if the addressing mode represented
486 /// by AM is legal for this target, for a load/store of the specified type.
487 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
488 Type *Ty, unsigned AS,
489 Instruction *I = nullptr) const override;
490
491 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
492
493 /// Returns true if the addressing mode representing by AM is legal
494 /// for the Thumb1 target, for a load/store of the specified type.
495 bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
496
497 /// isLegalICmpImmediate - Return true if the specified immediate is legal
498 /// icmp immediate, that is the target has icmp instructions which can
499 /// compare a register against the immediate without having to materialize
500 /// the immediate into a register.
501 bool isLegalICmpImmediate(int64_t Imm) const override;
502
503 /// isLegalAddImmediate - Return true if the specified immediate is legal
504 /// add immediate, that is the target has add instructions which can
505 /// add a register and the immediate without having to materialize
506 /// the immediate into a register.
507 bool isLegalAddImmediate(int64_t Imm) const override;
508
509 /// getPreIndexedAddressParts - returns true by value, base pointer and
510 /// offset pointer and addressing mode by reference if the node's address
511 /// can be legally represented as pre-indexed load / store address.
514 SelectionDAG &DAG) const override;
515
516 /// getPostIndexedAddressParts - returns true by value, base pointer and
517 /// offset pointer and addressing mode by reference if this node can be
518 /// combined with a load / store to form a post-indexed load / store.
521 SelectionDAG &DAG) const override;
522
524 const APInt &DemandedElts,
525 const SelectionDAG &DAG,
526 unsigned Depth) const override;
527
529 const APInt &DemandedElts,
530 TargetLoweringOpt &TLO) const override;
531
532 bool ExpandInlineAsm(CallInst *CI) const override;
533
534 ConstraintType getConstraintType(StringRef Constraint) const override;
535
536 /// Examine constraint string and operand type and determine a weight value.
537 /// The operand object must already have been set up with the operand type.
539 AsmOperandInfo &info, const char *constraint) const override;
540
541 std::pair<unsigned, const TargetRegisterClass *>
543 StringRef Constraint, MVT VT) const override;
544
545 const char *LowerXConstraint(EVT ConstraintVT) const override;
546
547 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
548 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
549 /// true it means one of the asm constraint of the inline asm instruction
550 /// being processed is 'm'.
552 std::vector<SDValue> &Ops,
553 SelectionDAG &DAG) const override;
554
556 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
557 if (ConstraintCode == "Q")
559 if (ConstraintCode.size() == 2) {
560 if (ConstraintCode[0] == 'U') {
561 switch(ConstraintCode[1]) {
562 default:
563 break;
564 case 'm':
566 case 'n':
568 case 'q':
570 case 's':
572 case 't':
574 case 'v':
576 case 'y':
578 }
579 }
580 }
581 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
582 }
583
584 const ARMSubtarget* getSubtarget() const {
585 return Subtarget;
586 }
587
588 /// getRegClassFor - Return the register class that should be used for the
589 /// specified value type.
590 const TargetRegisterClass *
591 getRegClassFor(MVT VT, bool isDivergent = false) const override;
592
593 bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
594 Align &PrefAlign) const override;
595
596 /// createFastISel - This method returns a target specific FastISel object,
597 /// or null if the target does not support "fast" ISel.
599 const TargetLibraryInfo *libInfo) const override;
600
602
603 bool preferZeroCompareBranch() const override { return true; }
604
605 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
606
607 bool
608 isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
609 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
610
611 /// isFPImmLegal - Returns true if the target can instruction select the
612 /// specified FP immediate natively. If false, the legalizer will
613 /// materialize the FP immediate as a load from a constant pool.
614 bool isFPImmLegal(const APFloat &Imm, EVT VT,
615 bool ForCodeSize = false) const override;
616
617 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
618 const CallInst &I,
619 MachineFunction &MF,
620 unsigned Intrinsic) const override;
621
622 /// Returns true if it is beneficial to convert a load of a constant
623 /// to just the constant itself.
625 Type *Ty) const override;
626
627 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
628 /// with this index.
629 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
630 unsigned Index) const override;
631
632 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
633 bool MathUsed) const override {
634 // Using overflow ops for overflow checks only should beneficial on ARM.
635 return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
636 }
637
638 bool shouldReassociateReduction(unsigned Opc, EVT VT) const override {
639 return Opc != ISD::VECREDUCE_ADD;
640 }
641
642 /// Returns true if an argument of type Ty needs to be passed in a
643 /// contiguous block of registers in calling convention CallConv.
645 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
646 const DataLayout &DL) const override;
647
648 /// If a physical register, this returns the register that receives the
649 /// exception address on entry to an EH pad.
651 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
652
653 /// If a physical register, this returns the register that receives the
654 /// exception typeid on entry to a landing pad.
656 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
657
659 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
660 AtomicOrdering Ord) const override;
662 AtomicOrdering Ord) const override;
663
664 void
665 emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
666
668 AtomicOrdering Ord) const override;
670 AtomicOrdering Ord) const override;
671
672 unsigned getMaxSupportedInterleaveFactor() const override;
673
676 ArrayRef<unsigned> Indices,
677 unsigned Factor) const override;
679 unsigned Factor) const override;
680
681 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
683 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
685 shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
687 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
690
691 bool useLoadStackGuardNode(const Module &M) const override;
692
693 void insertSSPDeclarations(Module &M) const override;
694 Value *getSDagStackGuard(const Module &M) const override;
695 Function *getSSPStackGuardCheck(const Module &M) const override;
696
697 bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
698 unsigned &Cost) const override;
699
700 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
701 const MachineFunction &MF) const override {
702 // Do not merge to larger than i32.
703 return (MemVT.getSizeInBits() <= 32);
704 }
705
706 bool isCheapToSpeculateCttz(Type *Ty) const override;
707 bool isCheapToSpeculateCtlz(Type *Ty) const override;
708
709 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
710 return VT.isScalarInteger();
711 }
712
713 bool supportSwiftError() const override {
714 return true;
715 }
716
717 bool hasStandaloneRem(EVT VT) const override {
718 return HasStandaloneRem;
719 }
720
723 unsigned ExpansionFactor) const override;
724
725 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const;
726 CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const;
727
728 /// Returns true if \p VecTy is a legal interleaved access type. This
729 /// function checks the vector element type and the overall width of the
730 /// vector.
731 bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy,
732 Align Alignment,
733 const DataLayout &DL) const;
734
736 SDValue ConstNode) const override;
737
738 bool alignLoopsWithOptSize() const override;
739
740 /// Returns the number of interleaved accesses that will be generated when
741 /// lowering accesses of the given type.
743 const DataLayout &DL) const;
744
745 void finalizeLowering(MachineFunction &MF) const override;
746
747 /// Return the correct alignment for the current calling convention.
749 const DataLayout &DL) const override;
750
752 CombineLevel Level) const override;
753
754 bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
755
757 CombineLevel Level) const override;
758
759 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
760 EVT VT) const override;
761
762 bool preferIncOfAddToSubOfNot(EVT VT) const override;
763
764 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
765
766 bool isComplexDeinterleavingSupported() const override;
768 ComplexDeinterleavingOperation Operation, Type *Ty) const override;
769
772 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
773 Value *Accumulator = nullptr) const override;
774
775 bool softPromoteHalfType() const override { return true; }
776
777 bool useFPRegsForHalfType() const override { return true; }
778
779 protected:
780 std::pair<const TargetRegisterClass *, uint8_t>
782 MVT VT) const override;
783
784 private:
785 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
786 /// make the right decision when generating code for different targets.
787 const ARMSubtarget *Subtarget;
788
790
791 const InstrItineraryData *Itins;
792
793 // TODO: remove this, and have shouldInsertFencesForAtomic do the proper
794 // check.
795 bool InsertFencesForAtomic;
796
797 bool HasStandaloneRem = true;
798
799 void addTypeForNEON(MVT VT, MVT PromotedLdStVT);
800 void addDRTypeForNEON(MVT VT);
801 void addQRTypeForNEON(MVT VT);
802 std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
803
804 using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>;
805
806 void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
807 SDValue &Arg, RegsToPassVector &RegsToPass,
808 CCValAssign &VA, CCValAssign &NextVA,
809 SDValue &StackPtr,
810 SmallVectorImpl<SDValue> &MemOpChains,
811 bool IsTailCall,
812 int SPDiff) const;
813 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
814 SDValue &Root, SelectionDAG &DAG,
815 const SDLoc &dl) const;
816
817 CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
818 bool isVarArg) const;
819 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
820 bool isVarArg) const;
821 std::pair<SDValue, MachinePointerInfo>
822 computeAddrForCallArg(const SDLoc &dl, SelectionDAG &DAG,
823 const CCValAssign &VA, SDValue StackPtr,
824 bool IsTailCall, int SPDiff) const;
825 ByValCopyKind ByValNeedsCopyForTailCall(SelectionDAG &DAG, SDValue Src,
826 SDValue Dst,
827 ISD::ArgFlagsTy Flags) const;
828 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
829 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
830 SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
831 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG,
832 const ARMSubtarget *Subtarget) const;
833 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
834 const ARMSubtarget *Subtarget) const;
835 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
836 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
837 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
838 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
839 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
840 SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
841 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
842 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
843 SelectionDAG &DAG) const;
844 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
845 SelectionDAG &DAG,
846 TLSModel::Model model) const;
847 SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
848 SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const;
849 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
850 SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const;
851 SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const;
852 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
853 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
854 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
855 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
856 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
857 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
858 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
859 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
860 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
861 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
862 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
863 SDValue LowerSET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
864 SDValue LowerRESET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
865 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
866 const ARMSubtarget *ST) const;
867 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
868 const ARMSubtarget *ST) const;
869 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
870 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
871 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
872 SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const;
873 void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed,
875 SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
876 const ARMSubtarget *Subtarget) const;
877 SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed,
878 SDValue &Chain) const;
879 SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const;
880 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
881 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
882 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
883 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
884 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
885 SDValue LowerFSETCC(SDValue Op, SelectionDAG &DAG) const;
886 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
887 void LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
888 SelectionDAG &DAG) const;
889 SDValue LowerFP_TO_BF16(SDValue Op, SelectionDAG &DAG) const;
890
891 Register getRegisterByName(const char* RegName, LLT VT,
892 const MachineFunction &MF) const override;
893
894 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
895 SmallVectorImpl<SDNode *> &Created) const override;
896
897 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
898 EVT VT) const override;
899
900 SDValue MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT, MVT ValVT,
901 SDValue Val) const;
902 SDValue MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT,
903 MVT ValVT, SDValue Val) const;
904
905 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
906
907 SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
908 CallingConv::ID CallConv, bool isVarArg,
910 const SDLoc &dl, SelectionDAG &DAG,
911 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
912 SDValue ThisVal, bool isCmseNSCall) const;
913
914 bool supportSplitCSR(MachineFunction *MF) const override {
916 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
917 }
918
919 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
920 void insertCopiesSplitCSR(
921 MachineBasicBlock *Entry,
922 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
923
924 bool splitValueIntoRegisterParts(
925 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
926 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
927 const override;
928
929 SDValue joinRegisterPartsIntoValue(
930 SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts,
931 unsigned NumParts, MVT PartVT, EVT ValueVT,
932 std::optional<CallingConv::ID> CC) const override;
933
934 SDValue
935 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
936 const SmallVectorImpl<ISD::InputArg> &Ins,
937 const SDLoc &dl, SelectionDAG &DAG,
938 SmallVectorImpl<SDValue> &InVals) const override;
939
940 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
941 SDValue &Chain, const Value *OrigArg,
942 unsigned InRegsParamRecordIdx, int ArgOffset,
943 unsigned ArgSize) const;
944
945 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
946 const SDLoc &dl, SDValue &Chain,
947 unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
948 bool ForceMutable = false) const;
949
950 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
951 SmallVectorImpl<SDValue> &InVals) const override;
952
953 /// HandleByVal - Target-specific cleanup for ByVal support.
954 void HandleByVal(CCState *, unsigned &, Align) const override;
955
956 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
957 /// for tail call optimization. Targets which want to do tail call
958 /// optimization should implement this function.
959 bool IsEligibleForTailCallOptimization(
960 TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo,
961 SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const;
962
963 bool CanLowerReturn(CallingConv::ID CallConv,
964 MachineFunction &MF, bool isVarArg,
965 const SmallVectorImpl<ISD::OutputArg> &Outs,
966 LLVMContext &Context) const override;
967
968 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
969 const SmallVectorImpl<ISD::OutputArg> &Outs,
970 const SmallVectorImpl<SDValue> &OutVals,
971 const SDLoc &dl, SelectionDAG &DAG) const override;
972
973 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
974
975 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
976
977 bool shouldConsiderGEPOffsetSplit() const override { return true; }
978
979 bool isUnsupportedFloatingType(EVT VT) const;
980
981 SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
982 SDValue ARMcc, SDValue Flags, SelectionDAG &DAG) const;
983 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
984 SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
985 SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
986 const SDLoc &dl, bool Signaling = false) const;
987
988 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
989
990 void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
991 MachineBasicBlock *DispatchBB, int FI) const;
992
993 void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const;
994
995 MachineBasicBlock *EmitStructByval(MachineInstr &MI,
996 MachineBasicBlock *MBB) const;
997
998 MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
999 MachineBasicBlock *MBB) const;
1000 MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
1001 MachineBasicBlock *MBB) const;
1002 void addMVEVectorTypes(bool HasMVEFP);
1003 void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action);
1004 void setAllExpand(MVT VT);
1005 };
1006
1013
1014 namespace ARM {
1015
1016 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1017 const TargetLibraryInfo *libInfo);
1018
1019 } // end namespace ARM
1020
1021} // end namespace llvm
1022
1023#endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint32_t Index
IRTranslator LLVM IR MI
#define RegName(no)
lazy value info
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
PowerPC Reduce CR logical Operation
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
bool isReadOnly(const GlobalValue *GV) const
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const override
Return the correct alignment for the current calling convention.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount through its operand,...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
const ARMSubtarget * getSubtarget() const
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const
Returns true if the addressing mode representing by AM is legal for the Thumb1 target,...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, Align &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
void finalizeLowering(MachineFunction &MF) const override
Execute target specific actions to finalize target lowering.
SDValue PerformMVETruncCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize=false) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
bool useFPRegsForHalfType() const override
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
SDValue PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const
PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT...
bool ExpandInlineAsm(CallInst *CI) const override
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to.
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const override
Create the IR node for the given complex deinterleaving operation.
bool isComplexDeinterleavingSupported() const override
Does this target support complex deinterleaving.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
SDValue PerformMVEExtCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &OriginalDemandedBits, const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the value type to use for ISD::SETCC.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
isShuffleMaskLegal - Targets can use this to indicate that they only support some VECTOR_SHUFFLE oper...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Try to convert math with an overflow comparison into the corresponding DAG node operation.
bool isSelectSupported(SelectSupportKind Kind) const override
const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const override
getRegClassFor - Return the register class that should be used for the specified value type.
bool useLoadStackGuardNode(const Module &M) const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool shouldReassociateReduction(unsigned Opc, EVT VT) const override
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vstN intrinsic.
bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const override
Does this target support complex deinterleaving with the given operation and type.
SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const
PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
Type * shouldConvertSplatType(ShuffleVectorInst *SVI) const override
Given a shuffle vector SVI representing a vector splat, return a new scalar type of size equal to SVI...
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
Instruction * makeDMB(IRBuilderBase &Builder, ARM_MB::MemBOpt Domain) const
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
const char * LowerXConstraint(EVT ConstraintVT) const override
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy, Align Alignment, const DataLayout &DL) const
Returns true if VecTy is a legal interleaved access type.
bool isVectorLoadExtDesirable(SDValue ExtVal) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable.
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override
Return true if the target can combine store(extractelement VectorTy, Idx).
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vldN intrinsic.
bool preferZeroCompareBranch() const override
Return true if the heuristic to prefer icmp eq zero should be used in code gen prepare.
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool useSoftFloat() const override
bool alignLoopsWithOptSize() const override
Should loops be aligned even when the function is marked OptSize (but not MinSize).
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool softPromoteHalfType() const override
bool hasStandaloneRem(EVT VT) const override
Return true if the target can handle a standalone remainder operation.
ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const override
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
CCValAssign - Represent assignment of one arg/retval to a location.
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:563
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:277
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
Itinerary data supplied by a subtarget to be used by a target.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Machine Value Type.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Definition: MachineInstr.h:69
Flags
Flags values. These may be or'd together.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
Provides information about what library functions are available for the current target.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
SelectSupportKind
Enum that describes what type of support for selects the target has.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
bool isBitFieldInvertedMask(unsigned v)
const unsigned FPStatusBits
const unsigned FPReservedBits
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPSCR.
const unsigned RoundingBitsPos
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1490
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
Definition: ISDOpcodes.h:1444
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:1502
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1562
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1613
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CombineLevel
Definition: DAGCombine.h:15
DWARFExpression::Operation Op
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:157