LLVM 20.0.0git
SystemZISelLowering.h
Go to the documentation of this file.
1//===-- SystemZISelLowering.h - SystemZ DAG lowering interface --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that SystemZ uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H
15#define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H
16
17#include "SystemZ.h"
18#include "SystemZInstrInfo.h"
22#include <optional>
23
24namespace llvm {
25namespace SystemZISD {
26enum NodeType : unsigned {
28
29 // Return with a glue operand. Operand 0 is the chain operand.
31
32 // Calls a function. Operand 0 is the chain operand and operand 1
33 // is the target address. The arguments start at operand 2.
34 // There is an optional glue operand at the end.
37
38 // TLS calls. Like regular calls, except operand 1 is the TLS symbol.
39 // (The call target is implicitly __tls_get_offset.)
42
43 // Wraps a TargetGlobalAddress that should be loaded using PC-relative
44 // accesses (LARL). Operand 0 is the address.
46
47 // Used in cases where an offset is applied to a TargetGlobalAddress.
48 // Operand 0 is the full TargetGlobalAddress and operand 1 is a
49 // PCREL_WRAPPER for an anchor point. This is used so that we can
50 // cheaply refer to either the full address or the anchor point
51 // as a register base.
53
54 // Integer comparisons. There are three operands: the two values
55 // to compare, and an integer of type SystemZICMP.
57
58 // Floating-point comparisons. The two operands are the values to compare.
60
61 // Test under mask. The first operand is ANDed with the second operand
62 // and the condition codes are set on the result. The third operand is
63 // a boolean that is true if the condition codes need to distinguish
64 // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the
65 // register forms do but the memory forms don't).
67
68 // Branches if a condition is true. Operand 0 is the chain operand;
69 // operand 1 is the 4-bit condition-code mask, with bit N in
70 // big-endian order meaning "branch if CC=N"; operand 2 is the
71 // target block and operand 3 is the flag operand.
73
74 // Selects between operand 0 and operand 1. Operand 2 is the
75 // mask of condition-code values for which operand 0 should be
76 // chosen over operand 1; it has the same form as BR_CCMASK.
77 // Operand 3 is the flag operand.
79
80 // Evaluates to the gap between the stack pointer and the
81 // base of the dynamically-allocatable area.
83
84 // For allocating stack space when using stack clash protector.
85 // Allocation is performed by block, and each block is probed.
87
88 // Count number of bits set in operand 0 per byte.
90
91 // Wrappers around the ISD opcodes of the same name. The output is GR128.
92 // Input operands may be GR64 or GR32, depending on the instruction.
97
98 // Add/subtract with overflow/carry. These have the same operands as
99 // the corresponding standard operations, except with the carry flag
100 // replaced by a condition code value.
102
103 // Set the condition code from a boolean value in operand 0.
104 // Operand 1 is a mask of all condition-code values that may result of this
105 // operation, operand 2 is a mask of condition-code values that may result
106 // if the boolean is true.
107 // Note that this operation is always optimized away, we will never
108 // generate any code for it.
110
111 // Use a series of MVCs to copy bytes from one memory location to another.
112 // The operands are:
113 // - the target address
114 // - the source address
115 // - the constant length
116 //
117 // This isn't a memory opcode because we'd need to attach two
118 // MachineMemOperands rather than one.
120
121 // Similar to MVC, but for logic operations (AND, OR, XOR).
125
126 // Use CLC to compare two blocks of memory, with the same comments
127 // as for MVC.
129
130 // Use MVC to set a block of memory after storing the first byte.
132
133 // Use an MVST-based sequence to implement stpcpy().
135
136 // Use a CLST-based sequence to implement strcmp(). The two input operands
137 // are the addresses of the strings to compare.
139
140 // Use an SRST-based sequence to search a block of memory. The first
141 // operand is the end address, the second is the start, and the third
142 // is the character to search for. CC is set to 1 on success and 2
143 // on failure.
145
146 // Store the CC value in bits 29 and 28 of an integer.
148
149 // Transaction begin. The first operand is the chain, the second
150 // the TDB pointer, and the third the immediate control field.
151 // Returns CC value and chain.
154
155 // Transaction end. Just the chain operand. Returns CC value and chain.
157
158 // Create a vector constant by filling byte N of the result with bit
159 // 15-N of the single operand.
161
162 // Create a vector constant by replicating an element-sized RISBG-style mask.
163 // The first operand specifies the starting set bit and the second operand
164 // specifies the ending set bit. Both operands count from the MSB of the
165 // element.
167
168 // Replicate a GPR scalar value into all elements of a vector.
170
171 // Create a vector from two i64 GPRs.
173
174 // Replicate one element of a vector into all elements. The first operand
175 // is the vector and the second is the index of the element to replicate.
177
178 // Interleave elements from the high half of operand 0 and the high half
179 // of operand 1.
181
182 // Likewise for the low halves.
184
185 // Concatenate the vectors in the first two operands, shift them left
186 // by the third operand, and take the first half of the result.
188
189 // Take one element of the first v2i64 operand and the one element of
190 // the second v2i64 operand and concatenate them to form a v2i64 result.
191 // The third operand is a 4-bit value of the form 0A0B, where A and B
192 // are the element selectors for the first operand and second operands
193 // respectively.
195
196 // Perform a general vector permute on vector operands 0 and 1.
197 // Each byte of operand 2 controls the corresponding byte of the result,
198 // in the same way as a byte-level VECTOR_SHUFFLE mask.
200
201 // Pack vector operands 0 and 1 into a single vector with half-sized elements.
203
204 // Likewise, but saturate the result and set CC. PACKS_CC does signed
205 // saturation and PACKLS_CC does unsigned saturation.
208
209 // Unpack the first half of vector operand 0 into double-sized elements.
210 // UNPACK_HIGH sign-extends and UNPACKL_HIGH zero-extends.
213
214 // Likewise for the second half.
217
218 // Shift/rotate each element of vector operand 0 by the number of bits
219 // specified by scalar operand 1.
224
225 // For each element of the output type, sum across all sub-elements of
226 // operand 0 belonging to the corresponding element, and add in the
227 // rightmost sub-element of the corresponding element of operand 1.
229
230 // Compute carry/borrow indication for add/subtract.
232 // Add/subtract with carry/borrow.
234 // Compute carry/borrow indication for add/subtract with carry/borrow.
236
237 // Compare integer vector operands 0 and 1 to produce the usual 0/-1
238 // vector result. VICMPE is for equality, VICMPH for "signed greater than"
239 // and VICMPHL for "unsigned greater than".
243
244 // Likewise, but also set the condition codes on the result.
248
249 // Compare floating-point vector operands 0 and 1 to produce the usual 0/-1
250 // vector result. VFCMPE is for "ordered and equal", VFCMPH for "ordered and
251 // greater than" and VFCMPHE for "ordered and greater than or equal to".
255
256 // Likewise, but also set the condition codes on the result.
260
261 // Test floating-point data class for vectors.
263
264 // Extend the even f32 elements of vector operand 0 to produce a vector
265 // of f64 elements.
267
268 // Round the f64 elements of vector operand 0 to f32s and store them in the
269 // even elements of the result.
271
272 // AND the two vector operands together and set CC based on the result.
274
275 // i128 high integer comparisons.
278
279 // String operations that set CC as a side-effect.
291
292 // Test Data Class.
293 //
294 // Operand 0: the value to test
295 // Operand 1: the bit mask
297
298 // z/OS XPLINK ADA Entry
299 // Wraps a TargetGlobalAddress that should be loaded from a function's
300 // AssociatedData Area (ADA). Tha ADA is passed to the function by the
301 // caller in the XPLink ABI defined register R5.
302 // Operand 0: the GlobalValue/External Symbol
303 // Operand 1: the ADA register
304 // Operand 2: the offset (0 for the first and 8 for the second element in the
305 // function descriptor)
307
308 // Strict variants of scalar floating-point comparisons.
309 // Quiet and signaling versions.
312
313 // Strict variants of vector floating-point comparisons.
314 // Quiet and signaling versions.
321
322 // Strict variants of VEXTEND and VROUND.
325
326 // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or
327 // ATOMIC_LOAD_<op>.
328 //
329 // Operand 0: the address of the containing 32-bit-aligned field
330 // Operand 1: the second operand of <op>, in the high bits of an i32
331 // for everything except ATOMIC_SWAPW
332 // Operand 2: how many bits to rotate the i32 left to bring the first
333 // operand into the high bits
334 // Operand 3: the negative of operand 2, for rotating the other way
335 // Operand 4: the width of the field in bits (8 or 16)
347
348 // A wrapper around the inner loop of an ATOMIC_CMP_SWAP.
349 //
350 // Operand 0: the address of the containing 32-bit-aligned field
351 // Operand 1: the compare value, in the low bits of an i32
352 // Operand 2: the swap value, in the low bits of an i32
353 // Operand 3: how many bits to rotate the i32 left to bring the first
354 // operand into the high bits
355 // Operand 4: the negative of operand 2, for rotating the other way
356 // Operand 5: the width of the field in bits (8 or 16)
358
359 // Atomic compare-and-swap returning CC value.
360 // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
362
363 // 128-bit atomic load.
364 // Val, OUTCHAIN = ATOMIC_LOAD_128(INCHAIN, ptr)
366
367 // 128-bit atomic store.
368 // OUTCHAIN = ATOMIC_STORE_128(INCHAIN, val, ptr)
370
371 // 128-bit atomic compare-and-swap.
372 // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
374
375 // Byte swapping load/store. Same operands as regular load/store.
377
378 // Element swapping load/store. Same operands as regular load/store.
380
381 // Use STORE CLOCK FAST to store current TOD clock value.
383
384 // Prefetch from the second operand using the 4-bit control code in
385 // the first operand. The code is 1 for a load prefetch and 2 for
386 // a store prefetch.
389
390// Return true if OPCODE is some kind of PC-relative address.
391inline bool isPCREL(unsigned Opcode) {
392 return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET;
393}
394} // end namespace SystemZISD
395
396namespace SystemZICMP {
397// Describes whether an integer comparison needs to be signed or unsigned,
398// or whether either type is OK.
399enum {
404} // end namespace SystemZICMP
405
406class SystemZSubtarget;
407
409public:
410 explicit SystemZTargetLowering(const TargetMachine &TM,
411 const SystemZSubtarget &STI);
412
413 bool useSoftFloat() const override;
414
415 // Override TargetLowering.
416 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
417 return MVT::i32;
418 }
419 MVT getVectorIdxTy(const DataLayout &DL) const override {
420 // Only the lower 12 bits of an element index are used, so we don't
421 // want to clobber the upper 32 bits of a GPR unnecessarily.
422 return MVT::i32;
423 }
425 const override {
426 // Widen subvectors to the full width rather than promoting integer
427 // elements. This is better because:
428 //
429 // (a) it means that we can handle the ABI for passing and returning
430 // sub-128 vectors without having to handle them as legal types.
431 //
432 // (b) we don't have instructions to extend on load and truncate on store,
433 // so promoting the integers is less efficient.
434 //
435 // (c) there are no multiplication instructions for the widest integer
436 // type (v2i64).
437 if (VT.getScalarSizeInBits() % 8 == 0)
438 return TypeWidenVector;
440 }
441 unsigned
443 std::optional<MVT> RegisterVT) const override {
444 // i128 inline assembly operand.
445 if (VT == MVT::i128 && RegisterVT && *RegisterVT == MVT::Untyped)
446 return 1;
447 return TargetLowering::getNumRegisters(Context, VT);
448 }
450 EVT VT) const override {
451 // 128-bit single-element vector types are passed like other vectors,
452 // not like their element type.
453 if (VT.isVector() && VT.getSizeInBits() == 128 &&
454 VT.getVectorNumElements() == 1)
455 return MVT::v16i8;
457 }
458 bool isCheapToSpeculateCtlz(Type *) const override { return true; }
459 bool isCheapToSpeculateCttz(Type *) const override { return true; }
460 bool preferZeroCompareBranch() const override { return true; }
461 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override {
462 ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
463 return Mask && Mask->getValue().isIntN(16);
464 }
465 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
466 return VT.isScalarInteger();
467 }
469 EVT) const override;
471 EVT VT) const override;
472 bool isFPImmLegal(const APFloat &Imm, EVT VT,
473 bool ForCodeSize) const override;
474 bool ShouldShrinkFPConstant(EVT VT) const override {
475 // Do not shrink 64-bit FP constpool entries since LDEB is slower than
476 // LD, and having the full constant in memory enables reg/mem opcodes.
477 return VT != MVT::f64;
478 }
480 MachineBasicBlock *MBB) const;
481
483 MachineBasicBlock *MBB) const;
484
485 bool hasInlineStackProbe(const MachineFunction &MF) const override;
489 shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override;
490 bool isLegalICmpImmediate(int64_t Imm) const override;
491 bool isLegalAddImmediate(int64_t Imm) const override;
492 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
493 unsigned AS,
494 Instruction *I = nullptr) const override;
495 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment,
497 unsigned *Fast) const override;
498 bool
499 findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
500 const MemOp &Op, unsigned DstAS, unsigned SrcAS,
501 const AttributeList &FuncAttributes) const override;
503 const AttributeList &FuncAttributes) const override;
504 bool isTruncateFree(Type *, Type *) const override;
505 bool isTruncateFree(EVT, EVT) const override;
506
507 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
508 bool MathUsed) const override {
509 // Form add and sub with overflow intrinsics regardless of any extra
510 // users of the math result.
511 return VT == MVT::i32 || VT == MVT::i64;
512 }
513
514 bool shouldConsiderGEPOffsetSplit() const override { return true; }
515
516 bool shouldExpandCmpUsingSelects(EVT VT) const override { return true; }
517
518 const char *getTargetNodeName(unsigned Opcode) const override;
519 std::pair<unsigned, const TargetRegisterClass *>
521 StringRef Constraint, MVT VT) const override;
523 getConstraintType(StringRef Constraint) const override;
525 getSingleConstraintMatchWeight(AsmOperandInfo &info,
526 const char *constraint) const override;
528 std::vector<SDValue> &Ops,
529 SelectionDAG &DAG) const override;
530
532 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
533 if (ConstraintCode.size() == 1) {
534 switch(ConstraintCode[0]) {
535 default:
536 break;
537 case 'o':
539 case 'Q':
541 case 'R':
543 case 'S':
545 case 'T':
547 }
548 } else if (ConstraintCode.size() == 2 && ConstraintCode[0] == 'Z') {
549 switch (ConstraintCode[1]) {
550 default:
551 break;
552 case 'Q':
554 case 'R':
556 case 'S':
558 case 'T':
560 }
561 }
562 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
563 }
564
565 Register getRegisterByName(const char *RegName, LLT VT,
566 const MachineFunction &MF) const override;
567
568 /// If a physical register, this returns the register that receives the
569 /// exception address on entry to an EH pad.
571 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
572
573 /// If a physical register, this returns the register that receives the
574 /// exception typeid on entry to a landing pad.
576 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
577
578 /// Override to support customized stack guard loading.
579 bool useLoadStackGuardNode(const Module &M) const override { return true; }
580 void insertSSPDeclarations(Module &M) const override {
581 }
582
585 MachineBasicBlock *BB) const override;
586 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
588 SelectionDAG &DAG) const override;
590 SelectionDAG &DAG) const override;
591 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
592 bool allowTruncateForTailCall(Type *, Type *) const override;
593 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
595 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
596 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
597 const override;
599 SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts,
600 unsigned NumParts, MVT PartVT, EVT ValueVT,
601 std::optional<CallingConv::ID> CC) const override;
603 bool isVarArg,
605 const SDLoc &DL, SelectionDAG &DAG,
606 SmallVectorImpl<SDValue> &InVals) const override;
607 SDValue LowerCall(CallLoweringInfo &CLI,
608 SmallVectorImpl<SDValue> &InVals) const override;
609
610 std::pair<SDValue, SDValue>
611 makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName,
612 EVT RetVT, ArrayRef<SDValue> Ops, CallingConv::ID CallConv,
613 bool IsSigned, SDLoc DL, bool DoesNotReturn,
614 bool IsReturnValueUsed) const;
615
617 bool isVarArg,
619 LLVMContext &Context) const override;
620 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
622 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
623 SelectionDAG &DAG) const override;
624 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
625
626 /// Determine which of the bits specified in Mask are known to be either
627 /// zero or one and return them in the KnownZero/KnownOne bitsets.
629 KnownBits &Known,
630 const APInt &DemandedElts,
631 const SelectionDAG &DAG,
632 unsigned Depth = 0) const override;
633
634 /// Determine the number of bits in the operation that are sign bits.
636 const APInt &DemandedElts,
637 const SelectionDAG &DAG,
638 unsigned Depth) const override;
639
641 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
642 bool PoisonOnly, unsigned Depth) const override;
643
645 return ISD::ANY_EXTEND;
646 }
648 return ISD::ZERO_EXTEND;
649 }
650
651 bool supportSwiftError() const override {
652 return true;
653 }
654
655 unsigned getStackProbeSize(const MachineFunction &MF) const;
656
657private:
658 const SystemZSubtarget &Subtarget;
659
660 // Implement LowerOperation for individual opcodes.
661 SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
662 const SDLoc &DL, EVT VT,
663 SDValue CmpOp0, SDValue CmpOp1, SDValue Chain) const;
664 SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL,
665 EVT VT, ISD::CondCode CC,
666 SDValue CmpOp0, SDValue CmpOp1,
667 SDValue Chain = SDValue(),
668 bool IsSignaling = false) const;
669 SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const;
670 SDValue lowerSTRICT_FSETCC(SDValue Op, SelectionDAG &DAG,
671 bool IsSignaling) const;
672 SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
673 SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
674 SDValue lowerGlobalAddress(GlobalAddressSDNode *Node,
675 SelectionDAG &DAG) const;
676 SDValue lowerTLSGetOffset(GlobalAddressSDNode *Node,
677 SelectionDAG &DAG, unsigned Opcode,
678 SDValue GOTOffset) const;
679 SDValue lowerThreadPointer(const SDLoc &DL, SelectionDAG &DAG) const;
680 SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
681 SelectionDAG &DAG) const;
682 SDValue lowerBlockAddress(BlockAddressSDNode *Node,
683 SelectionDAG &DAG) const;
684 SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const;
685 SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const;
686 SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
687 SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
688 SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
689 SDValue lowerVASTART_ELF(SDValue Op, SelectionDAG &DAG) const;
690 SDValue lowerVASTART_XPLINK(SDValue Op, SelectionDAG &DAG) const;
691 SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
692 SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
693 SDValue lowerDYNAMIC_STACKALLOC_ELF(SDValue Op, SelectionDAG &DAG) const;
694 SDValue lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op, SelectionDAG &DAG) const;
695 SDValue lowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
696 SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
697 SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
698 SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
699 SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
700 SDValue lowerXALUO(SDValue Op, SelectionDAG &DAG) const;
701 SDValue lowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) const;
702 SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
703 SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const;
704 SDValue lowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
705 SDValue lowerVECREDUCE_ADD(SDValue Op, SelectionDAG &DAG) const;
706 SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
707 SDValue lowerATOMIC_LDST_I128(SDValue Op, SelectionDAG &DAG) const;
708 SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG,
709 unsigned Opcode) const;
710 SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
711 SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
712 SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
713 SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
714 SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
715 SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
716 SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
717 bool isVectorElementLoad(SDValue Op) const;
718 SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
719 SmallVectorImpl<SDValue> &Elems) const;
720 SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
721 SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
722 SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
723 SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
724 SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
725 SDValue lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
726 SDValue lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
727 SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const;
728 SDValue lowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;
729 SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
730 SDValue lowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
731
732 bool canTreatAsByteVector(EVT VT) const;
733 SDValue combineExtract(const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp,
734 unsigned Index, DAGCombinerInfo &DCI,
735 bool Force) const;
736 SDValue combineTruncateExtract(const SDLoc &DL, EVT TruncVT, SDValue Op,
737 DAGCombinerInfo &DCI) const;
738 SDValue combineZERO_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
739 SDValue combineSIGN_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
740 SDValue combineSIGN_EXTEND_INREG(SDNode *N, DAGCombinerInfo &DCI) const;
741 SDValue combineMERGE(SDNode *N, DAGCombinerInfo &DCI) const;
742 bool canLoadStoreByteSwapped(EVT VT) const;
743 SDValue combineLOAD(SDNode *N, DAGCombinerInfo &DCI) const;
744 SDValue combineSTORE(SDNode *N, DAGCombinerInfo &DCI) const;
745 SDValue combineVECTOR_SHUFFLE(SDNode *N, DAGCombinerInfo &DCI) const;
746 SDValue combineEXTRACT_VECTOR_ELT(SDNode *N, DAGCombinerInfo &DCI) const;
747 SDValue combineJOIN_DWORDS(SDNode *N, DAGCombinerInfo &DCI) const;
748 SDValue combineFP_ROUND(SDNode *N, DAGCombinerInfo &DCI) const;
749 SDValue combineFP_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
750 SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const;
751 SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const;
752 SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
753 SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
754 SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
755 SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const;
756 SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const;
757
758 SDValue unwrapAddress(SDValue N) const override;
759
760 // If the last instruction before MBBI in MBB was some form of COMPARE,
761 // try to replace it with a COMPARE AND BRANCH just before MBBI.
762 // CCMask and Target are the BRC-like operands for the branch.
763 // Return true if the change was made.
764 bool convertPrevCompareToBranch(MachineBasicBlock *MBB,
766 unsigned CCMask,
768
769 // Implement EmitInstrWithCustomInserter for individual operation types.
770 MachineBasicBlock *emitAdjCallStack(MachineInstr &MI,
771 MachineBasicBlock *BB) const;
772 MachineBasicBlock *emitSelect(MachineInstr &MI, MachineBasicBlock *BB) const;
774 unsigned StoreOpcode, unsigned STOCOpcode,
775 bool Invert) const;
777 bool Unsigned) const;
778 MachineBasicBlock *emitPair128(MachineInstr &MI,
779 MachineBasicBlock *MBB) const;
781 bool ClearEven) const;
782 MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI,
784 unsigned BinOpcode,
785 bool Invert = false) const;
786 MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr &MI,
788 unsigned CompareOpcode,
789 unsigned KeepOldMask) const;
790 MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr &MI,
791 MachineBasicBlock *BB) const;
792 MachineBasicBlock *emitMemMemWrapper(MachineInstr &MI, MachineBasicBlock *BB,
793 unsigned Opcode,
794 bool IsMemset = false) const;
795 MachineBasicBlock *emitStringWrapper(MachineInstr &MI, MachineBasicBlock *BB,
796 unsigned Opcode) const;
797 MachineBasicBlock *emitTransactionBegin(MachineInstr &MI,
799 unsigned Opcode, bool NoFloat) const;
800 MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr &MI,
802 unsigned Opcode) const;
803 MachineBasicBlock *emitProbedAlloca(MachineInstr &MI,
804 MachineBasicBlock *MBB) const;
805
806 SDValue getBackchainAddress(SDValue SP, SelectionDAG &DAG) const;
807
809 getTargetMMOFlags(const Instruction &I) const override;
810 const TargetRegisterClass *getRepRegClassFor(MVT VT) const override;
811
812 bool isFullyInternal(const Function *Fn) const;
813 void verifyNarrowIntegerArgs_Call(const SmallVectorImpl<ISD::OutputArg> &Outs,
814 const Function *F, SDValue Callee) const;
815 void verifyNarrowIntegerArgs_Ret(const SmallVectorImpl<ISD::OutputArg> &Outs,
816 const Function *F) const;
817 bool verifyNarrowIntegerArgs(const SmallVectorImpl<ISD::OutputArg> &Outs,
818 bool IsInternal) const;
819};
820
822private:
823 APInt IntBits; // The 128 bits as an integer.
824 APInt SplatBits; // Smallest splat value.
825 APInt SplatUndef; // Bits correspoding to undef operands of the BVN.
826 unsigned SplatBitSize = 0;
827 bool isFP128 = false;
828public:
829 unsigned Opcode = 0;
834 : SystemZVectorConstantInfo(FPImm.bitcastToAPInt()) {
835 isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad());
836 }
838 bool isVectorConstantLegal(const SystemZSubtarget &Subtarget);
839};
840
841} // end namespace llvm
842
843#endif
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
uint32_t Index
IRTranslator LLVM IR MI
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
This file describes how to lower LLVM code to machine code.
const fltSemantics & getSemantics() const
Definition: APFloat.h:1448
Class for arbitrary precision integers.
Definition: APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
A "pseudo-class" with methods for operating on BUILD_VECTORs.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Machine Value Type.
uint64_t getScalarSizeInBits() const
Representation of each machine instruction.
Definition: MachineInstr.h:69
Flags
Flags values. These may be or'd together.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Try to convert math with an overflow comparison into the corresponding DAG node operation.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool shouldExpandCmpUsingSelects(EVT VT) const override
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean ...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
ISD::NodeType getExtendForAtomicOps() const override
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND,...
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
MVT getVectorIdxTy(const DataLayout &DL) const override
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool preferZeroCompareBranch() const override
Return true if the heuristic to prefer icmp eq zero should be used in code gen prepare.
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
bool useSoftFloat() const override
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool shouldConsiderGEPOffsetSplit() const override
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isCheapToSpeculateCtlz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool ShouldShrinkFPConstant(EVT VT) const override
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT) const override
Return the number of registers that this ValueType will eventually require.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool isCheapToSpeculateCttz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Target - Wrapper for Target specific information.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Value * getOperand(unsigned i) const
Definition: User.h:228
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:814
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1490
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:811
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:1502
static const int FIRST_TARGET_STRICTFP_OPCODE
FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations which cannot raise FP exceptions s...
Definition: ISDOpcodes.h:1496
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1613
bool isPCREL(unsigned Opcode)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
#define N
static const fltSemantics & IEEEquad() LLVM_READNONE
Definition: APFloat.cpp:267
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:157
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:331
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)