Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1152, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name InstrEmitter.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/CodeGen/SelectionDAG -I include -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-16-232930-107970-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp

1//==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the Emit routines for the SelectionDAG class, which creates
10// MachineInstrs based on the decisions of the SelectionDAG instruction
11// selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "InstrEmitter.h"
16#include "SDNodeDbgValue.h"
17#include "llvm/ADT/Statistic.h"
18#include "llvm/CodeGen/MachineConstantPool.h"
19#include "llvm/CodeGen/MachineFunction.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/SelectionDAG.h"
23#include "llvm/CodeGen/StackMaps.h"
24#include "llvm/CodeGen/TargetInstrInfo.h"
25#include "llvm/CodeGen/TargetLowering.h"
26#include "llvm/CodeGen/TargetSubtargetInfo.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugInfo.h"
29#include "llvm/IR/PseudoProbe.h"
30#include "llvm/Support/Debug.h"
31#include "llvm/Support/ErrorHandling.h"
32#include "llvm/Support/MathExtras.h"
33#include "llvm/Target/TargetMachine.h"
34using namespace llvm;
35
36#define DEBUG_TYPE"instr-emitter" "instr-emitter"
37
38/// MinRCSize - Smallest register class we allow when constraining virtual
39/// registers. If satisfying all register class constraints would require
40/// using a smaller register class, emit a COPY to a new virtual register
41/// instead.
42const unsigned MinRCSize = 4;
43
44/// CountResults - The results of target nodes have register or immediate
45/// operands first, then an optional chain, and optional glue operands (which do
46/// not go into the resulting MachineInstr).
47unsigned InstrEmitter::CountResults(SDNode *Node) {
48 unsigned N = Node->getNumValues();
49 while (N && Node->getValueType(N - 1) == MVT::Glue)
50 --N;
51 if (N && Node->getValueType(N - 1) == MVT::Other)
52 --N; // Skip over chain result.
53 return N;
54}
55
56/// countOperands - The inputs to target nodes have any actual inputs first,
57/// followed by an optional chain operand, then an optional glue operand.
58/// Compute the number of actual operands that will go into the resulting
59/// MachineInstr.
60///
61/// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding
62/// the chain and glue. These operands may be implicit on the machine instr.
63static unsigned countOperands(SDNode *Node, unsigned NumExpUses,
64 unsigned &NumImpUses) {
65 unsigned N = Node->getNumOperands();
66 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
67 --N;
68 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
69 --N; // Ignore chain if it exists.
70
71 // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses.
72 NumImpUses = N - NumExpUses;
73 for (unsigned I = N; I > NumExpUses; --I) {
74 if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
75 continue;
76 if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
77 if (Register::isPhysicalRegister(RN->getReg()))
78 continue;
79 NumImpUses = N - I;
80 break;
81 }
82
83 return N;
84}
85
86/// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
87/// implicit physical register output.
88void InstrEmitter::
89EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
90 Register SrcReg, DenseMap<SDValue, Register> &VRBaseMap) {
91 Register VRBase;
92 if (SrcReg.isVirtual()) {
93 // Just use the input register directly!
94 SDValue Op(Node, ResNo);
95 if (IsClone)
96 VRBaseMap.erase(Op);
97 bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second;
98 (void)isNew; // Silence compiler warning.
99 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 99, __extension__
__PRETTY_FUNCTION__))
;
100 return;
101 }
102
103 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
104 // the CopyToReg'd destination register instead of creating a new vreg.
105 bool MatchReg = true;
106 const TargetRegisterClass *UseRC = nullptr;
107 MVT VT = Node->getSimpleValueType(ResNo);
108
109 // Stick to the preferred register classes for legal types.
110 if (TLI->isTypeLegal(VT))
111 UseRC = TLI->getRegClassFor(VT, Node->isDivergent());
112
113 if (!IsClone && !IsCloned)
114 for (SDNode *User : Node->uses()) {
115 bool Match = true;
116 if (User->getOpcode() == ISD::CopyToReg &&
117 User->getOperand(2).getNode() == Node &&
118 User->getOperand(2).getResNo() == ResNo) {
119 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
120 if (DestReg.isVirtual()) {
121 VRBase = DestReg;
122 Match = false;
123 } else if (DestReg != SrcReg)
124 Match = false;
125 } else {
126 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
127 SDValue Op = User->getOperand(i);
128 if (Op.getNode() != Node || Op.getResNo() != ResNo)
129 continue;
130 MVT VT = Node->getSimpleValueType(Op.getResNo());
131 if (VT == MVT::Other || VT == MVT::Glue)
132 continue;
133 Match = false;
134 if (User->isMachineOpcode()) {
135 const MCInstrDesc &II = TII->get(User->getMachineOpcode());
136 const TargetRegisterClass *RC = nullptr;
137 if (i+II.getNumDefs() < II.getNumOperands()) {
138 RC = TRI->getAllocatableClass(
139 TII->getRegClass(II, i+II.getNumDefs(), TRI, *MF));
140 }
141 if (!UseRC)
142 UseRC = RC;
143 else if (RC) {
144 const TargetRegisterClass *ComRC =
145 TRI->getCommonSubClass(UseRC, RC);
146 // If multiple uses expect disjoint register classes, we emit
147 // copies in AddRegisterOperand.
148 if (ComRC)
149 UseRC = ComRC;
150 }
151 }
152 }
153 }
154 MatchReg &= Match;
155 if (VRBase)
156 break;
157 }
158
159 const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr;
160 SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
161
162 // Figure out the register class to create for the destreg.
163 if (VRBase) {
164 DstRC = MRI->getRegClass(VRBase);
165 } else if (UseRC) {
166 assert(TRI->isTypeLegalForClass(*UseRC, VT) &&(static_cast <bool> (TRI->isTypeLegalForClass(*UseRC
, VT) && "Incompatible phys register def and uses!") ?
void (0) : __assert_fail ("TRI->isTypeLegalForClass(*UseRC, VT) && \"Incompatible phys register def and uses!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 167, __extension__
__PRETTY_FUNCTION__))
167 "Incompatible phys register def and uses!")(static_cast <bool> (TRI->isTypeLegalForClass(*UseRC
, VT) && "Incompatible phys register def and uses!") ?
void (0) : __assert_fail ("TRI->isTypeLegalForClass(*UseRC, VT) && \"Incompatible phys register def and uses!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 167, __extension__
__PRETTY_FUNCTION__))
;
168 DstRC = UseRC;
169 } else
170 DstRC = SrcRC;
171
172 // If all uses are reading from the src physical register and copying the
173 // register is either impossible or very expensive, then don't create a copy.
174 if (MatchReg && SrcRC->getCopyCost() < 0) {
175 VRBase = SrcReg;
176 } else {
177 // Create the reg, emit the copy.
178 VRBase = MRI->createVirtualRegister(DstRC);
179 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
180 VRBase).addReg(SrcReg);
181 }
182
183 SDValue Op(Node, ResNo);
184 if (IsClone)
185 VRBaseMap.erase(Op);
186 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
187 (void)isNew; // Silence compiler warning.
188 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 188, __extension__
__PRETTY_FUNCTION__))
;
189}
190
191void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
192 MachineInstrBuilder &MIB,
193 const MCInstrDesc &II,
194 bool IsClone, bool IsCloned,
195 DenseMap<SDValue, Register> &VRBaseMap) {
196 assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&(static_cast <bool> (Node->getMachineOpcode() != TargetOpcode
::IMPLICIT_DEF && "IMPLICIT_DEF should have been handled as a special case elsewhere!"
) ? void (0) : __assert_fail ("Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF && \"IMPLICIT_DEF should have been handled as a special case elsewhere!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 197, __extension__
__PRETTY_FUNCTION__))
197 "IMPLICIT_DEF should have been handled as a special case elsewhere!")(static_cast <bool> (Node->getMachineOpcode() != TargetOpcode
::IMPLICIT_DEF && "IMPLICIT_DEF should have been handled as a special case elsewhere!"
) ? void (0) : __assert_fail ("Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF && \"IMPLICIT_DEF should have been handled as a special case elsewhere!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 197, __extension__
__PRETTY_FUNCTION__))
;
198
199 unsigned NumResults = CountResults(Node);
200 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
201 II.isVariadic() && II.variadicOpsAreDefs();
202 unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs();
203 if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT)
204 NumVRegs = NumResults;
205 for (unsigned i = 0; i < NumVRegs; ++i) {
206 // If the specific node value is only used by a CopyToReg and the dest reg
207 // is a vreg in the same register class, use the CopyToReg'd destination
208 // register instead of creating a new vreg.
209 Register VRBase;
210 const TargetRegisterClass *RC =
211 TRI->getAllocatableClass(TII->getRegClass(II, i, TRI, *MF));
212 // Always let the value type influence the used register class. The
213 // constraints on the instruction may be too lax to represent the value
214 // type correctly. For example, a 64-bit float (X86::FR64) can't live in
215 // the 32-bit float super-class (X86::FR32).
216 if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) {
217 const TargetRegisterClass *VTRC = TLI->getRegClassFor(
218 Node->getSimpleValueType(i),
219 (Node->isDivergent() || (RC && TRI->isDivergentRegClass(RC))));
220 if (RC)
221 VTRC = TRI->getCommonSubClass(RC, VTRC);
222 if (VTRC)
223 RC = VTRC;
224 }
225
226 if (II.OpInfo != nullptr && II.OpInfo[i].isOptionalDef()) {
227 // Optional def must be a physical register.
228 VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
229 assert(VRBase.isPhysical())(static_cast <bool> (VRBase.isPhysical()) ? void (0) : __assert_fail
("VRBase.isPhysical()", "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp"
, 229, __extension__ __PRETTY_FUNCTION__))
;
230 MIB.addReg(VRBase, RegState::Define);
231 }
232
233 if (!VRBase && !IsClone && !IsCloned)
234 for (SDNode *User : Node->uses()) {
235 if (User->getOpcode() == ISD::CopyToReg &&
236 User->getOperand(2).getNode() == Node &&
237 User->getOperand(2).getResNo() == i) {
238 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
239 if (Register::isVirtualRegister(Reg)) {
240 const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
241 if (RegRC == RC) {
242 VRBase = Reg;
243 MIB.addReg(VRBase, RegState::Define);
244 break;
245 }
246 }
247 }
248 }
249
250 // Create the result registers for this node and add the result regs to
251 // the machine instruction.
252 if (VRBase == 0) {
253 assert(RC && "Isn't a register operand!")(static_cast <bool> (RC && "Isn't a register operand!"
) ? void (0) : __assert_fail ("RC && \"Isn't a register operand!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 253, __extension__
__PRETTY_FUNCTION__))
;
254 VRBase = MRI->createVirtualRegister(RC);
255 MIB.addReg(VRBase, RegState::Define);
256 }
257
258 // If this def corresponds to a result of the SDNode insert the VRBase into
259 // the lookup map.
260 if (i < NumResults) {
261 SDValue Op(Node, i);
262 if (IsClone)
263 VRBaseMap.erase(Op);
264 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
265 (void)isNew; // Silence compiler warning.
266 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 266, __extension__
__PRETTY_FUNCTION__))
;
267 }
268 }
269}
270
271/// getVR - Return the virtual register corresponding to the specified result
272/// of the specified node.
273Register InstrEmitter::getVR(SDValue Op,
274 DenseMap<SDValue, Register> &VRBaseMap) {
275 if (Op.isMachineOpcode() &&
6
Calling 'SDValue::isMachineOpcode'
276 Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
277 // Add an IMPLICIT_DEF instruction before every use.
278 // IMPLICIT_DEF can produce any type of result so its MCInstrDesc
279 // does not include operand register class info.
280 const TargetRegisterClass *RC = TLI->getRegClassFor(
281 Op.getSimpleValueType(), Op.getNode()->isDivergent());
282 Register VReg = MRI->createVirtualRegister(RC);
283 BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
284 TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
285 return VReg;
286 }
287
288 DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op);
289 assert(I != VRBaseMap.end() && "Node emitted out of order - late")(static_cast <bool> (I != VRBaseMap.end() && "Node emitted out of order - late"
) ? void (0) : __assert_fail ("I != VRBaseMap.end() && \"Node emitted out of order - late\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 289, __extension__
__PRETTY_FUNCTION__))
;
290 return I->second;
291}
292
293
294/// AddRegisterOperand - Add the specified register as an operand to the
295/// specified machine instr. Insert register copies if the register is
296/// not in the required register class.
297void
298InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
299 SDValue Op,
300 unsigned IIOpNum,
301 const MCInstrDesc *II,
302 DenseMap<SDValue, Register> &VRBaseMap,
303 bool IsDebug, bool IsClone, bool IsCloned) {
304 assert(Op.getValueType() != MVT::Other &&(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 306, __extension__
__PRETTY_FUNCTION__))
305 Op.getValueType() != MVT::Glue &&(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 306, __extension__
__PRETTY_FUNCTION__))
306 "Chain and glue operands should occur at end of operand list!")(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 306, __extension__
__PRETTY_FUNCTION__))
;
307 // Get/emit the operand.
308 Register VReg = getVR(Op, VRBaseMap);
309
310 const MCInstrDesc &MCID = MIB->getDesc();
311 bool isOptDef = IIOpNum < MCID.getNumOperands() &&
312 MCID.OpInfo[IIOpNum].isOptionalDef();
313
314 // If the instruction requires a register in a different class, create
315 // a new virtual register and copy the value into it, but first attempt to
316 // shrink VReg's register class within reason. For example, if VReg == GR32
317 // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP.
318 if (II) {
319 const TargetRegisterClass *OpRC = nullptr;
320 if (IIOpNum < II->getNumOperands())
321 OpRC = TII->getRegClass(*II, IIOpNum, TRI, *MF);
322
323 if (OpRC) {
324 const TargetRegisterClass *ConstrainedRC
325 = MRI->constrainRegClass(VReg, OpRC, MinRCSize);
326 if (!ConstrainedRC) {
327 OpRC = TRI->getAllocatableClass(OpRC);
328 assert(OpRC && "Constraints cannot be fulfilled for allocation")(static_cast <bool> (OpRC && "Constraints cannot be fulfilled for allocation"
) ? void (0) : __assert_fail ("OpRC && \"Constraints cannot be fulfilled for allocation\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 328, __extension__
__PRETTY_FUNCTION__))
;
329 Register NewVReg = MRI->createVirtualRegister(OpRC);
330 BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
331 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
332 VReg = NewVReg;
333 } else {
334 assert(ConstrainedRC->isAllocatable() &&(static_cast <bool> (ConstrainedRC->isAllocatable() &&
"Constraining an allocatable VReg produced an unallocatable class?"
) ? void (0) : __assert_fail ("ConstrainedRC->isAllocatable() && \"Constraining an allocatable VReg produced an unallocatable class?\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 335, __extension__
__PRETTY_FUNCTION__))
335 "Constraining an allocatable VReg produced an unallocatable class?")(static_cast <bool> (ConstrainedRC->isAllocatable() &&
"Constraining an allocatable VReg produced an unallocatable class?"
) ? void (0) : __assert_fail ("ConstrainedRC->isAllocatable() && \"Constraining an allocatable VReg produced an unallocatable class?\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 335, __extension__
__PRETTY_FUNCTION__))
;
336 }
337 }
338 }
339
340 // If this value has only one use, that use is a kill. This is a
341 // conservative approximation. InstrEmitter does trivial coalescing
342 // with CopyFromReg nodes, so don't emit kill flags for them.
343 // Avoid kill flags on Schedule cloned nodes, since there will be
344 // multiple uses.
345 // Tied operands are never killed, so we need to check that. And that
346 // means we need to determine the index of the operand.
347 bool isKill = Op.hasOneUse() &&
348 Op.getNode()->getOpcode() != ISD::CopyFromReg &&
349 !IsDebug &&
350 !(IsClone || IsCloned);
351 if (isKill) {
352 unsigned Idx = MIB->getNumOperands();
353 while (Idx > 0 &&
354 MIB->getOperand(Idx-1).isReg() &&
355 MIB->getOperand(Idx-1).isImplicit())
356 --Idx;
357 bool isTied = MCID.getOperandConstraint(Idx, MCOI::TIED_TO) != -1;
358 if (isTied)
359 isKill = false;
360 }
361
362 MIB.addReg(VReg, getDefRegState(isOptDef) | getKillRegState(isKill) |
363 getDebugRegState(IsDebug));
364}
365
366/// AddOperand - Add the specified operand to the specified machine instr. II
367/// specifies the instruction information for the node, and IIOpNum is the
368/// operand number (in the II) that we are adding.
369void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
370 SDValue Op,
371 unsigned IIOpNum,
372 const MCInstrDesc *II,
373 DenseMap<SDValue, Register> &VRBaseMap,
374 bool IsDebug, bool IsClone, bool IsCloned) {
375 if (Op.isMachineOpcode()) {
376 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
377 IsDebug, IsClone, IsCloned);
378 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
379 MIB.addImm(C->getSExtValue());
380 } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
381 MIB.addFPImm(F->getConstantFPValue());
382 } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
383 Register VReg = R->getReg();
384 MVT OpVT = Op.getSimpleValueType();
385 const TargetRegisterClass *IIRC =
386 II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum, TRI, *MF))
387 : nullptr;
388 const TargetRegisterClass *OpRC =
389 TLI->isTypeLegal(OpVT)
390 ? TLI->getRegClassFor(OpVT,
391 Op.getNode()->isDivergent() ||
392 (IIRC && TRI->isDivergentRegClass(IIRC)))
393 : nullptr;
394
395 if (OpRC && IIRC && OpRC != IIRC && Register::isVirtualRegister(VReg)) {
396 Register NewVReg = MRI->createVirtualRegister(IIRC);
397 BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
398 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
399 VReg = NewVReg;
400 }
401 // Turn additional physreg operands into implicit uses on non-variadic
402 // instructions. This is used by call and return instructions passing
403 // arguments in registers.
404 bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic());
405 MIB.addReg(VReg, getImplRegState(Imp));
406 } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) {
407 MIB.addRegMask(RM->getRegMask());
408 } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
409 MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(),
410 TGA->getTargetFlags());
411 } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) {
412 MIB.addMBB(BBNode->getBasicBlock());
413 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
414 MIB.addFrameIndex(FI->getIndex());
415 } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
416 MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags());
417 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
418 int Offset = CP->getOffset();
419 Align Alignment = CP->getAlign();
420
421 unsigned Idx;
422 MachineConstantPool *MCP = MF->getConstantPool();
423 if (CP->isMachineConstantPoolEntry())
424 Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Alignment);
425 else
426 Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Alignment);
427 MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags());
428 } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
429 MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags());
430 } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) {
431 MIB.addSym(SymNode->getMCSymbol());
432 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
433 MIB.addBlockAddress(BA->getBlockAddress(),
434 BA->getOffset(),
435 BA->getTargetFlags());
436 } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) {
437 MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags());
438 } else {
439 assert(Op.getValueType() != MVT::Other &&(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 441, __extension__
__PRETTY_FUNCTION__))
440 Op.getValueType() != MVT::Glue &&(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 441, __extension__
__PRETTY_FUNCTION__))
441 "Chain and glue operands should occur at end of operand list!")(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 441, __extension__
__PRETTY_FUNCTION__))
;
442 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
443 IsDebug, IsClone, IsCloned);
444 }
445}
446
447Register InstrEmitter::ConstrainForSubReg(Register VReg, unsigned SubIdx,
448 MVT VT, bool isDivergent, const DebugLoc &DL) {
449 const TargetRegisterClass *VRC = MRI->getRegClass(VReg);
450 const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx);
451
452 // RC is a sub-class of VRC that supports SubIdx. Try to constrain VReg
453 // within reason.
454 if (RC && RC != VRC)
455 RC = MRI->constrainRegClass(VReg, RC, MinRCSize);
456
457 // VReg has been adjusted. It can be used with SubIdx operands now.
458 if (RC)
459 return VReg;
460
461 // VReg couldn't be reasonably constrained. Emit a COPY to a new virtual
462 // register instead.
463 RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT, isDivergent), SubIdx);
464 assert(RC && "No legal register class for VT supports that SubIdx")(static_cast <bool> (RC && "No legal register class for VT supports that SubIdx"
) ? void (0) : __assert_fail ("RC && \"No legal register class for VT supports that SubIdx\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 464, __extension__
__PRETTY_FUNCTION__))
;
465 Register NewReg = MRI->createVirtualRegister(RC);
466 BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg)
467 .addReg(VReg);
468 return NewReg;
469}
470
471/// EmitSubregNode - Generate machine code for subreg nodes.
472///
473void InstrEmitter::EmitSubregNode(SDNode *Node,
474 DenseMap<SDValue, Register> &VRBaseMap,
475 bool IsClone, bool IsCloned) {
476 Register VRBase;
477 unsigned Opc = Node->getMachineOpcode();
478
479 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
480 // the CopyToReg'd destination register instead of creating a new vreg.
481 for (SDNode *User : Node->uses()) {
482 if (User->getOpcode() == ISD::CopyToReg &&
483 User->getOperand(2).getNode() == Node) {
484 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
485 if (DestReg.isVirtual()) {
486 VRBase = DestReg;
487 break;
488 }
489 }
490 }
491
492 if (Opc == TargetOpcode::EXTRACT_SUBREG) {
493 // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no
494 // constraints on the %dst register, COPY can target all legal register
495 // classes.
496 unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
497 const TargetRegisterClass *TRC =
498 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
499
500 Register Reg;
501 MachineInstr *DefMI;
502 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0));
503 if (R && Register::isPhysicalRegister(R->getReg())) {
504 Reg = R->getReg();
505 DefMI = nullptr;
506 } else {
507 Reg = R ? R->getReg() : getVR(Node->getOperand(0), VRBaseMap);
508 DefMI = MRI->getVRegDef(Reg);
509 }
510
511 Register SrcReg, DstReg;
512 unsigned DefSubIdx;
513 if (DefMI &&
514 TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
515 SubIdx == DefSubIdx &&
516 TRC == MRI->getRegClass(SrcReg)) {
517 // Optimize these:
518 // r1025 = s/zext r1024, 4
519 // r1026 = extract_subreg r1025, 4
520 // to a copy
521 // r1026 = copy r1024
522 VRBase = MRI->createVirtualRegister(TRC);
523 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
524 TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
525 MRI->clearKillFlags(SrcReg);
526 } else {
527 // Reg may not support a SubIdx sub-register, and we may need to
528 // constrain its register class or issue a COPY to a compatible register
529 // class.
530 if (Reg.isVirtual())
531 Reg = ConstrainForSubReg(Reg, SubIdx,
532 Node->getOperand(0).getSimpleValueType(),
533 Node->isDivergent(), Node->getDebugLoc());
534 // Create the destreg if it is missing.
535 if (!VRBase)
536 VRBase = MRI->createVirtualRegister(TRC);
537
538 // Create the extract_subreg machine instruction.
539 MachineInstrBuilder CopyMI =
540 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
541 TII->get(TargetOpcode::COPY), VRBase);
542 if (Reg.isVirtual())
543 CopyMI.addReg(Reg, 0, SubIdx);
544 else
545 CopyMI.addReg(TRI->getSubReg(Reg, SubIdx));
546 }
547 } else if (Opc == TargetOpcode::INSERT_SUBREG ||
548 Opc == TargetOpcode::SUBREG_TO_REG) {
549 SDValue N0 = Node->getOperand(0);
550 SDValue N1 = Node->getOperand(1);
551 SDValue N2 = Node->getOperand(2);
552 unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
553
554 // Figure out the register class to create for the destreg. It should be
555 // the largest legal register class supporting SubIdx sub-registers.
556 // RegisterCoalescer will constrain it further if it decides to eliminate
557 // the INSERT_SUBREG instruction.
558 //
559 // %dst = INSERT_SUBREG %src, %sub, SubIdx
560 //
561 // is lowered by TwoAddressInstructionPass to:
562 //
563 // %dst = COPY %src
564 // %dst:SubIdx = COPY %sub
565 //
566 // There is no constraint on the %src register class.
567 //
568 const TargetRegisterClass *SRC =
569 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
570 SRC = TRI->getSubClassWithSubReg(SRC, SubIdx);
571 assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG")(static_cast <bool> (SRC && "No register class supports VT and SubIdx for INSERT_SUBREG"
) ? void (0) : __assert_fail ("SRC && \"No register class supports VT and SubIdx for INSERT_SUBREG\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 571, __extension__
__PRETTY_FUNCTION__))
;
572
573 if (VRBase == 0 || !SRC->hasSubClassEq(MRI->getRegClass(VRBase)))
574 VRBase = MRI->createVirtualRegister(SRC);
575
576 // Create the insert_subreg or subreg_to_reg machine instruction.
577 MachineInstrBuilder MIB =
578 BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc), VRBase);
579
580 // If creating a subreg_to_reg, then the first input operand
581 // is an implicit value immediate, otherwise it's a register
582 if (Opc == TargetOpcode::SUBREG_TO_REG) {
583 const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
584 MIB.addImm(SD->getZExtValue());
585 } else
586 AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
587 IsClone, IsCloned);
588 // Add the subregister being inserted
589 AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
590 IsClone, IsCloned);
591 MIB.addImm(SubIdx);
592 MBB->insert(InsertPos, MIB);
593 } else
594 llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg")::llvm::llvm_unreachable_internal("Node is not insert_subreg, extract_subreg, or subreg_to_reg"
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 594)
;
595
596 SDValue Op(Node, 0);
597 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
598 (void)isNew; // Silence compiler warning.
599 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 599, __extension__
__PRETTY_FUNCTION__))
;
600}
601
602/// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
603/// COPY_TO_REGCLASS is just a normal copy, except that the destination
604/// register is constrained to be in a particular register class.
605///
606void
607InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
608 DenseMap<SDValue, Register> &VRBaseMap) {
609 unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
610
611 // Create the new VReg in the destination class and emit a copy.
612 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
613 const TargetRegisterClass *DstRC =
614 TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
615 Register NewVReg = MRI->createVirtualRegister(DstRC);
616 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
617 NewVReg).addReg(VReg);
618
619 SDValue Op(Node, 0);
620 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
621 (void)isNew; // Silence compiler warning.
622 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 622, __extension__
__PRETTY_FUNCTION__))
;
623}
624
625/// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
626///
627void InstrEmitter::EmitRegSequence(SDNode *Node,
628 DenseMap<SDValue, Register> &VRBaseMap,
629 bool IsClone, bool IsCloned) {
630 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
631 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
632 Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
633 const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
634 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
635 unsigned NumOps = Node->getNumOperands();
636 // If the input pattern has a chain, then the root of the corresponding
637 // output pattern will get a chain as well. This can happen to be a
638 // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults).
639 if (NumOps && Node->getOperand(NumOps-1).getValueType() == MVT::Other)
640 --NumOps; // Ignore chain if it exists.
641
642 assert((NumOps & 1) == 1 &&(static_cast <bool> ((NumOps & 1) == 1 && "REG_SEQUENCE must have an odd number of operands!"
) ? void (0) : __assert_fail ("(NumOps & 1) == 1 && \"REG_SEQUENCE must have an odd number of operands!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 643, __extension__
__PRETTY_FUNCTION__))
643 "REG_SEQUENCE must have an odd number of operands!")(static_cast <bool> ((NumOps & 1) == 1 && "REG_SEQUENCE must have an odd number of operands!"
) ? void (0) : __assert_fail ("(NumOps & 1) == 1 && \"REG_SEQUENCE must have an odd number of operands!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 643, __extension__
__PRETTY_FUNCTION__))
;
644 for (unsigned i = 1; i != NumOps; ++i) {
645 SDValue Op = Node->getOperand(i);
646 if ((i & 1) == 0) {
647 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1));
648 // Skip physical registers as they don't have a vreg to get and we'll
649 // insert copies for them in TwoAddressInstructionPass anyway.
650 if (!R || !Register::isPhysicalRegister(R->getReg())) {
651 unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
652 unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
653 const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
654 const TargetRegisterClass *SRC =
655 TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
656 if (SRC && SRC != RC) {
657 MRI->setRegClass(NewVReg, SRC);
658 RC = SRC;
659 }
660 }
661 }
662 AddOperand(MIB, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false,
663 IsClone, IsCloned);
664 }
665
666 MBB->insert(InsertPos, MIB);
667 SDValue Op(Node, 0);
668 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
669 (void)isNew; // Silence compiler warning.
670 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 670, __extension__
__PRETTY_FUNCTION__))
;
671}
672
673/// EmitDbgValue - Generate machine instruction for a dbg_value node.
674///
675MachineInstr *
676InstrEmitter::EmitDbgValue(SDDbgValue *SD,
677 DenseMap<SDValue, Register> &VRBaseMap) {
678 MDNode *Var = SD->getVariable();
679 MDNode *Expr = SD->getExpression();
680 DebugLoc DL = SD->getDebugLoc();
681 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&(static_cast <bool> (cast<DILocalVariable>(Var)->
isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"
) ? void (0) : __assert_fail ("cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 682, __extension__
__PRETTY_FUNCTION__))
682 "Expected inlined-at fields to agree")(static_cast <bool> (cast<DILocalVariable>(Var)->
isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"
) ? void (0) : __assert_fail ("cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 682, __extension__
__PRETTY_FUNCTION__))
;
683
684 SD->setIsEmitted();
685
686 ArrayRef<SDDbgOperand> LocationOps = SD->getLocationOps();
687 assert(!LocationOps.empty() && "dbg_value with no location operands?")(static_cast <bool> (!LocationOps.empty() && "dbg_value with no location operands?"
) ? void (0) : __assert_fail ("!LocationOps.empty() && \"dbg_value with no location operands?\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 687, __extension__
__PRETTY_FUNCTION__))
;
688
689 if (SD->isInvalidated())
690 return EmitDbgNoLocation(SD);
691
692 // Emit variadic dbg_value nodes as DBG_VALUE_LIST.
693 if (SD->isVariadic()) {
694 // DBG_VALUE_LIST := "DBG_VALUE_LIST" var, expression, loc (, loc)*
695 const MCInstrDesc &DbgValDesc = TII->get(TargetOpcode::DBG_VALUE_LIST);
696 // Build the DBG_VALUE_LIST instruction base.
697 auto MIB = BuildMI(*MF, DL, DbgValDesc);
698 MIB.addMetadata(Var);
699 MIB.addMetadata(Expr);
700 AddDbgValueLocationOps(MIB, DbgValDesc, LocationOps, VRBaseMap);
701 return &*MIB;
702 }
703
704 // Attempt to produce a DBG_INSTR_REF if we've been asked to.
705 // We currently exclude the possibility of instruction references for
706 // variadic nodes; if at some point we enable them, this should be moved
707 // above the variadic block.
708 if (EmitDebugInstrRefs)
709 if (auto *InstrRef = EmitDbgInstrRef(SD, VRBaseMap))
710 return InstrRef;
711
712 return EmitDbgValueFromSingleOp(SD, VRBaseMap);
713}
714
715void InstrEmitter::AddDbgValueLocationOps(
716 MachineInstrBuilder &MIB, const MCInstrDesc &DbgValDesc,
717 ArrayRef<SDDbgOperand> LocationOps,
718 DenseMap<SDValue, Register> &VRBaseMap) {
719 for (const SDDbgOperand &Op : LocationOps) {
720 switch (Op.getKind()) {
721 case SDDbgOperand::FRAMEIX:
722 MIB.addFrameIndex(Op.getFrameIx());
723 break;
724 case SDDbgOperand::VREG:
725 MIB.addReg(Op.getVReg());
726 break;
727 case SDDbgOperand::SDNODE: {
728 SDValue V = SDValue(Op.getSDNode(), Op.getResNo());
729 // It's possible we replaced this SDNode with other(s) and therefore
730 // didn't generate code for it. It's better to catch these cases where
731 // they happen and transfer the debug info, but trying to guarantee that
732 // in all cases would be very fragile; this is a safeguard for any
733 // that were missed.
734 if (VRBaseMap.count(V) == 0)
735 MIB.addReg(0U); // undef
736 else
737 AddOperand(MIB, V, (*MIB).getNumOperands(), &DbgValDesc, VRBaseMap,
738 /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false);
739 } break;
740 case SDDbgOperand::CONST: {
741 const Value *V = Op.getConst();
742 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
743 if (CI->getBitWidth() > 64)
744 MIB.addCImm(CI);
745 else
746 MIB.addImm(CI->getSExtValue());
747 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
748 MIB.addFPImm(CF);
749 } else if (isa<ConstantPointerNull>(V)) {
750 // Note: This assumes that all nullptr constants are zero-valued.
751 MIB.addImm(0);
752 } else {
753 // Could be an Undef. In any case insert an Undef so we can see what we
754 // dropped.
755 MIB.addReg(0U);
756 }
757 } break;
758 }
759 }
760}
761
762MachineInstr *
763InstrEmitter::EmitDbgInstrRef(SDDbgValue *SD,
764 DenseMap<SDValue, Register> &VRBaseMap) {
765 assert(!SD->isVariadic())(static_cast <bool> (!SD->isVariadic()) ? void (0) :
__assert_fail ("!SD->isVariadic()", "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp"
, 765, __extension__ __PRETTY_FUNCTION__))
;
766 SDDbgOperand DbgOperand = SD->getLocationOps()[0];
767 MDNode *Var = SD->getVariable();
768 DIExpression *Expr = (DIExpression*)SD->getExpression();
769 DebugLoc DL = SD->getDebugLoc();
770 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_INSTR_REF);
771
772 // Handle variable locations that don't actually depend on the instructions
773 // in the program: constants and stack locations.
774 if (DbgOperand.getKind() == SDDbgOperand::FRAMEIX ||
775 DbgOperand.getKind() == SDDbgOperand::CONST)
776 return EmitDbgValueFromSingleOp(SD, VRBaseMap);
777
778 // Immediately fold any indirectness from the LLVM-IR intrinsic into the
779 // expression:
780 if (SD->isIndirect()) {
781 std::vector<uint64_t> Elts = {dwarf::DW_OP_deref};
782 Expr = DIExpression::append(Expr, Elts);
783 }
784
785 // It may not be immediately possible to identify the MachineInstr that
786 // defines a VReg, it can depend for example on the order blocks are
787 // emitted in. When this happens, or when further analysis is needed later,
788 // produce an instruction like this:
789 //
790 // DBG_INSTR_REF %0:gr64, 0, !123, !456
791 //
792 // i.e., point the instruction at the vreg, and patch it up later in
793 // MachineFunction::finalizeDebugInstrRefs.
794 auto EmitHalfDoneInstrRef = [&](unsigned VReg) -> MachineInstr * {
795 auto MIB = BuildMI(*MF, DL, RefII);
796 MIB.addReg(VReg);
797 MIB.addImm(0);
798 MIB.addMetadata(Var);
799 MIB.addMetadata(Expr);
800 return MIB;
801 };
802
803 // Try to find both the defined register and the instruction defining it.
804 MachineInstr *DefMI = nullptr;
805 unsigned VReg;
806
807 if (DbgOperand.getKind() == SDDbgOperand::VREG) {
808 VReg = DbgOperand.getVReg();
809
810 // No definition means that block hasn't been emitted yet. Leave a vreg
811 // reference to be fixed later.
812 if (!MRI->hasOneDef(VReg))
813 return EmitHalfDoneInstrRef(VReg);
814
815 DefMI = &*MRI->def_instr_begin(VReg);
816 } else {
817 assert(DbgOperand.getKind() == SDDbgOperand::SDNODE)(static_cast <bool> (DbgOperand.getKind() == SDDbgOperand
::SDNODE) ? void (0) : __assert_fail ("DbgOperand.getKind() == SDDbgOperand::SDNODE"
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 817, __extension__
__PRETTY_FUNCTION__))
;
818 // Look up the corresponding VReg for the given SDNode, if any.
819 SDNode *Node = DbgOperand.getSDNode();
820 SDValue Op = SDValue(Node, DbgOperand.getResNo());
821 DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op);
822 // No VReg -> produce a DBG_VALUE $noreg instead.
823 if (I==VRBaseMap.end())
824 return EmitDbgNoLocation(SD);
825
826 // Try to pick out a defining instruction at this point.
827 VReg = getVR(Op, VRBaseMap);
828
829 // Again, if there's no instruction defining the VReg right now, fix it up
830 // later.
831 if (!MRI->hasOneDef(VReg))
832 return EmitHalfDoneInstrRef(VReg);
833
834 DefMI = &*MRI->def_instr_begin(VReg);
835 }
836
837 // Avoid copy like instructions: they don't define values, only move them.
838 // Leave a virtual-register reference until it can be fixed up later, to find
839 // the underlying value definition.
840 if (DefMI->isCopyLike() || TII->isCopyInstr(*DefMI))
841 return EmitHalfDoneInstrRef(VReg);
842
843 auto MIB = BuildMI(*MF, DL, RefII);
844
845 // Find the operand number which defines the specified VReg.
846 unsigned OperandIdx = 0;
847 for (const auto &MO : DefMI->operands()) {
848 if (MO.isReg() && MO.isDef() && MO.getReg() == VReg)
849 break;
850 ++OperandIdx;
851 }
852 assert(OperandIdx < DefMI->getNumOperands())(static_cast <bool> (OperandIdx < DefMI->getNumOperands
()) ? void (0) : __assert_fail ("OperandIdx < DefMI->getNumOperands()"
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 852, __extension__
__PRETTY_FUNCTION__))
;
853
854 // Make the DBG_INSTR_REF refer to that instruction, and that operand.
855 unsigned InstrNum = DefMI->getDebugInstrNum();
856 MIB.addImm(InstrNum);
857 MIB.addImm(OperandIdx);
858 MIB.addMetadata(Var);
859 MIB.addMetadata(Expr);
860 return &*MIB;
861}
862
863MachineInstr *InstrEmitter::EmitDbgNoLocation(SDDbgValue *SD) {
864 // An invalidated SDNode must generate an undef DBG_VALUE: although the
865 // original value is no longer computed, earlier DBG_VALUEs live ranges
866 // must not leak into later code.
867 MDNode *Var = SD->getVariable();
868 MDNode *Expr = SD->getExpression();
869 DebugLoc DL = SD->getDebugLoc();
870 auto MIB = BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE));
871 MIB.addReg(0U);
872 MIB.addReg(0U);
873 MIB.addMetadata(Var);
874 MIB.addMetadata(Expr);
875 return &*MIB;
876}
877
878MachineInstr *
879InstrEmitter::EmitDbgValueFromSingleOp(SDDbgValue *SD,
880 DenseMap<SDValue, Register> &VRBaseMap) {
881 MDNode *Var = SD->getVariable();
882 DIExpression *Expr = SD->getExpression();
883 DebugLoc DL = SD->getDebugLoc();
884 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
885
886 assert(SD->getLocationOps().size() == 1 &&(static_cast <bool> (SD->getLocationOps().size() == 1
&& "Non variadic dbg_value should have only one location op"
) ? void (0) : __assert_fail ("SD->getLocationOps().size() == 1 && \"Non variadic dbg_value should have only one location op\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 887, __extension__
__PRETTY_FUNCTION__))
887 "Non variadic dbg_value should have only one location op")(static_cast <bool> (SD->getLocationOps().size() == 1
&& "Non variadic dbg_value should have only one location op"
) ? void (0) : __assert_fail ("SD->getLocationOps().size() == 1 && \"Non variadic dbg_value should have only one location op\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 887, __extension__
__PRETTY_FUNCTION__))
;
888
889 // See about constant-folding the expression.
890 // Copy the location operand in case we replace it.
891 SmallVector<SDDbgOperand, 1> LocationOps(1, SD->getLocationOps()[0]);
892 if (Expr && LocationOps[0].getKind() == SDDbgOperand::CONST) {
893 const Value *V = LocationOps[0].getConst();
894 if (auto *C = dyn_cast<ConstantInt>(V)) {
895 std::tie(Expr, C) = Expr->constantFold(C);
896 LocationOps[0] = SDDbgOperand::fromConst(C);
897 }
898 }
899
900 // Emit non-variadic dbg_value nodes as DBG_VALUE.
901 // DBG_VALUE := "DBG_VALUE" loc, isIndirect, var, expr
902 auto MIB = BuildMI(*MF, DL, II);
903 AddDbgValueLocationOps(MIB, II, LocationOps, VRBaseMap);
904
905 if (SD->isIndirect())
906 MIB.addImm(0U);
907 else
908 MIB.addReg(0U);
909
910 return MIB.addMetadata(Var).addMetadata(Expr);
911}
912
913MachineInstr *
914InstrEmitter::EmitDbgLabel(SDDbgLabel *SD) {
915 MDNode *Label = SD->getLabel();
916 DebugLoc DL = SD->getDebugLoc();
917 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&(static_cast <bool> (cast<DILabel>(Label)->isValidLocationForIntrinsic
(DL) && "Expected inlined-at fields to agree") ? void
(0) : __assert_fail ("cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 918, __extension__
__PRETTY_FUNCTION__))
918 "Expected inlined-at fields to agree")(static_cast <bool> (cast<DILabel>(Label)->isValidLocationForIntrinsic
(DL) && "Expected inlined-at fields to agree") ? void
(0) : __assert_fail ("cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 918, __extension__
__PRETTY_FUNCTION__))
;
919
920 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_LABEL);
921 MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
922 MIB.addMetadata(Label);
923
924 return &*MIB;
925}
926
927/// EmitMachineNode - Generate machine code for a target-specific node and
928/// needed dependencies.
929///
930void InstrEmitter::
931EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
932 DenseMap<SDValue, Register> &VRBaseMap) {
933 unsigned Opc = Node->getMachineOpcode();
934
935 // Handle subreg insert/extract specially
936 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
937 Opc == TargetOpcode::INSERT_SUBREG ||
938 Opc == TargetOpcode::SUBREG_TO_REG) {
939 EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned);
940 return;
941 }
942
943 // Handle COPY_TO_REGCLASS specially.
944 if (Opc == TargetOpcode::COPY_TO_REGCLASS) {
945 EmitCopyToRegClassNode(Node, VRBaseMap);
946 return;
947 }
948
949 // Handle REG_SEQUENCE specially.
950 if (Opc == TargetOpcode::REG_SEQUENCE) {
951 EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned);
952 return;
953 }
954
955 if (Opc == TargetOpcode::IMPLICIT_DEF)
956 // We want a unique VR for each IMPLICIT_DEF use.
957 return;
958
959 const MCInstrDesc &II = TII->get(Opc);
960 unsigned NumResults = CountResults(Node);
961 unsigned NumDefs = II.getNumDefs();
962 const MCPhysReg *ScratchRegs = nullptr;
963
964 // Handle STACKMAP and PATCHPOINT specially and then use the generic code.
965 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
966 // Stackmaps do not have arguments and do not preserve their calling
967 // convention. However, to simplify runtime support, they clobber the same
968 // scratch registers as AnyRegCC.
969 unsigned CC = CallingConv::AnyReg;
970 if (Opc == TargetOpcode::PATCHPOINT) {
971 CC = Node->getConstantOperandVal(PatchPointOpers::CCPos);
972 NumDefs = NumResults;
973 }
974 ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC);
975 } else if (Opc == TargetOpcode::STATEPOINT) {
976 NumDefs = NumResults;
977 }
978
979 unsigned NumImpUses = 0;
980 unsigned NodeOperands =
981 countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses);
982 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
983 II.isVariadic() && II.variadicOpsAreDefs();
984 bool HasPhysRegOuts = NumResults > NumDefs &&
985 II.getImplicitDefs() != nullptr && !HasVRegVariadicDefs;
986#ifndef NDEBUG
987 unsigned NumMIOperands = NodeOperands + NumResults;
988 if (II.isVariadic())
989 assert(NumMIOperands >= II.getNumOperands() &&(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && "Too few operands for a variadic node!") ? void
(0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && \"Too few operands for a variadic node!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 990, __extension__
__PRETTY_FUNCTION__))
990 "Too few operands for a variadic node!")(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && "Too few operands for a variadic node!") ? void
(0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && \"Too few operands for a variadic node!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 990, __extension__
__PRETTY_FUNCTION__))
;
991 else
992 assert(NumMIOperands >= II.getNumOperands() &&(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs
() + NumImpUses && "#operands for dag node doesn't match .td file!"
) ? void (0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() + NumImpUses && \"#operands for dag node doesn't match .td file!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 995, __extension__
__PRETTY_FUNCTION__))
993 NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() +(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs
() + NumImpUses && "#operands for dag node doesn't match .td file!"
) ? void (0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() + NumImpUses && \"#operands for dag node doesn't match .td file!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 995, __extension__
__PRETTY_FUNCTION__))
994 NumImpUses &&(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs
() + NumImpUses && "#operands for dag node doesn't match .td file!"
) ? void (0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() + NumImpUses && \"#operands for dag node doesn't match .td file!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 995, __extension__
__PRETTY_FUNCTION__))
995 "#operands for dag node doesn't match .td file!")(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs
() + NumImpUses && "#operands for dag node doesn't match .td file!"
) ? void (0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() + NumImpUses && \"#operands for dag node doesn't match .td file!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 995, __extension__
__PRETTY_FUNCTION__))
;
996#endif
997
998 // Create the new machine instruction.
999 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II);
1000
1001 // Add result register values for things that are defined by this
1002 // instruction.
1003 if (NumResults) {
1004 CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap);
1005
1006 // Transfer any IR flags from the SDNode to the MachineInstr
1007 MachineInstr *MI = MIB.getInstr();
1008 const SDNodeFlags Flags = Node->getFlags();
1009 if (Flags.hasNoSignedZeros())
1010 MI->setFlag(MachineInstr::MIFlag::FmNsz);
1011
1012 if (Flags.hasAllowReciprocal())
1013 MI->setFlag(MachineInstr::MIFlag::FmArcp);
1014
1015 if (Flags.hasNoNaNs())
1016 MI->setFlag(MachineInstr::MIFlag::FmNoNans);
1017
1018 if (Flags.hasNoInfs())
1019 MI->setFlag(MachineInstr::MIFlag::FmNoInfs);
1020
1021 if (Flags.hasAllowContract())
1022 MI->setFlag(MachineInstr::MIFlag::FmContract);
1023
1024 if (Flags.hasApproximateFuncs())
1025 MI->setFlag(MachineInstr::MIFlag::FmAfn);
1026
1027 if (Flags.hasAllowReassociation())
1028 MI->setFlag(MachineInstr::MIFlag::FmReassoc);
1029
1030 if (Flags.hasNoUnsignedWrap())
1031 MI->setFlag(MachineInstr::MIFlag::NoUWrap);
1032
1033 if (Flags.hasNoSignedWrap())
1034 MI->setFlag(MachineInstr::MIFlag::NoSWrap);
1035
1036 if (Flags.hasExact())
1037 MI->setFlag(MachineInstr::MIFlag::IsExact);
1038
1039 if (Flags.hasNoFPExcept())
1040 MI->setFlag(MachineInstr::MIFlag::NoFPExcept);
1041 }
1042
1043 // Emit all of the actual operands of this instruction, adding them to the
1044 // instruction as appropriate.
1045 bool HasOptPRefs = NumDefs > NumResults;
1046 assert((!HasOptPRefs || !HasPhysRegOuts) &&(static_cast <bool> ((!HasOptPRefs || !HasPhysRegOuts) &&
"Unable to cope with optional defs and phys regs defs!") ? void
(0) : __assert_fail ("(!HasOptPRefs || !HasPhysRegOuts) && \"Unable to cope with optional defs and phys regs defs!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1047, __extension__
__PRETTY_FUNCTION__))
1047 "Unable to cope with optional defs and phys regs defs!")(static_cast <bool> ((!HasOptPRefs || !HasPhysRegOuts) &&
"Unable to cope with optional defs and phys regs defs!") ? void
(0) : __assert_fail ("(!HasOptPRefs || !HasPhysRegOuts) && \"Unable to cope with optional defs and phys regs defs!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1047, __extension__
__PRETTY_FUNCTION__))
;
1048 unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0;
1049 for (unsigned i = NumSkip; i != NodeOperands; ++i)
1050 AddOperand(MIB, Node->getOperand(i), i-NumSkip+NumDefs, &II,
1051 VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned);
1052
1053 // Add scratch registers as implicit def and early clobber
1054 if (ScratchRegs)
1055 for (unsigned i = 0; ScratchRegs[i]; ++i)
1056 MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
1057 RegState::EarlyClobber);
1058
1059 // Set the memory reference descriptions of this instruction now that it is
1060 // part of the function.
1061 MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands());
1062
1063 // Insert the instruction into position in the block. This needs to
1064 // happen before any custom inserter hook is called so that the
1065 // hook knows where in the block to insert the replacement code.
1066 MBB->insert(InsertPos, MIB);
1067
1068 // The MachineInstr may also define physregs instead of virtregs. These
1069 // physreg values can reach other instructions in different ways:
1070 //
1071 // 1. When there is a use of a Node value beyond the explicitly defined
1072 // virtual registers, we emit a CopyFromReg for one of the implicitly
1073 // defined physregs. This only happens when HasPhysRegOuts is true.
1074 //
1075 // 2. A CopyFromReg reading a physreg may be glued to this instruction.
1076 //
1077 // 3. A glued instruction may implicitly use a physreg.
1078 //
1079 // 4. A glued instruction may use a RegisterSDNode operand.
1080 //
1081 // Collect all the used physreg defs, and make sure that any unused physreg
1082 // defs are marked as dead.
1083 SmallVector<Register, 8> UsedRegs;
1084
1085 // Additional results must be physical register defs.
1086 if (HasPhysRegOuts) {
1087 for (unsigned i = NumDefs; i < NumResults; ++i) {
1088 Register Reg = II.getImplicitDefs()[i - NumDefs];
1089 if (!Node->hasAnyUseOfValue(i))
1090 continue;
1091 // This implicitly defined physreg has a use.
1092 UsedRegs.push_back(Reg);
1093 EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
1094 }
1095 }
1096
1097 // Scan the glue chain for any used physregs.
1098 if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
1099 for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) {
1100 if (F->getOpcode() == ISD::CopyFromReg) {
1101 UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
1102 continue;
1103 } else if (F->getOpcode() == ISD::CopyToReg) {
1104 // Skip CopyToReg nodes that are internal to the glue chain.
1105 continue;
1106 }
1107 // Collect declared implicit uses.
1108 const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
1109 UsedRegs.append(MCID.getImplicitUses(),
1110 MCID.getImplicitUses() + MCID.getNumImplicitUses());
1111 // In addition to declared implicit uses, we must also check for
1112 // direct RegisterSDNode operands.
1113 for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
1114 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
1115 Register Reg = R->getReg();
1116 if (Reg.isPhysical())
1117 UsedRegs.push_back(Reg);
1118 }
1119 }
1120 }
1121
1122 // Finally mark unused registers as dead.
1123 if (!UsedRegs.empty() || II.getImplicitDefs() || II.hasOptionalDef())
1124 MIB->setPhysRegsDeadExcept(UsedRegs, *TRI);
1125
1126 // STATEPOINT is too 'dynamic' to have meaningful machine description.
1127 // We have to manually tie operands.
1128 if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) {
1129 assert(!HasPhysRegOuts && "STATEPOINT mishandled")(static_cast <bool> (!HasPhysRegOuts && "STATEPOINT mishandled"
) ? void (0) : __assert_fail ("!HasPhysRegOuts && \"STATEPOINT mishandled\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1129, __extension__
__PRETTY_FUNCTION__))
;
1130 MachineInstr *MI = MIB;
1131 unsigned Def = 0;
1132 int First = StatepointOpers(MI).getFirstGCPtrIdx();
1133 assert(First > 0 && "Statepoint has Defs but no GC ptr list")(static_cast <bool> (First > 0 && "Statepoint has Defs but no GC ptr list"
) ? void (0) : __assert_fail ("First > 0 && \"Statepoint has Defs but no GC ptr list\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1133, __extension__
__PRETTY_FUNCTION__))
;
1134 unsigned Use = (unsigned)First;
1135 while (Def < NumDefs) {
1136 if (MI->getOperand(Use).isReg())
1137 MI->tieOperands(Def++, Use);
1138 Use = StackMaps::getNextMetaArgIdx(MI, Use);
1139 }
1140 }
1141
1142 // Run post-isel target hook to adjust this instruction if needed.
1143 if (II.hasPostISelHook())
1144 TLI->AdjustInstrPostInstrSelection(*MIB, Node);
1145}
1146
1147/// EmitSpecialNode - Generate machine code for a target-independent node and
1148/// needed dependencies.
1149void InstrEmitter::
1150EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
1151 DenseMap<SDValue, Register> &VRBaseMap) {
1152 switch (Node->getOpcode()) {
1
Control jumps to 'case CopyToReg:' at line 1163
1153 default:
1154#ifndef NDEBUG
1155 Node->dump();
1156#endif
1157 llvm_unreachable("This target-independent node should have been selected!")::llvm::llvm_unreachable_internal("This target-independent node should have been selected!"
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1157)
;
1158 case ISD::EntryToken:
1159 llvm_unreachable("EntryToken should have been excluded from the schedule!")::llvm::llvm_unreachable_internal("EntryToken should have been excluded from the schedule!"
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1159)
;
1160 case ISD::MERGE_VALUES:
1161 case ISD::TokenFactor: // fall thru
1162 break;
1163 case ISD::CopyToReg: {
1164 Register DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1165 SDValue SrcVal = Node->getOperand(2);
1166 if (Register::isVirtualRegister(DestReg) && SrcVal.isMachineOpcode() &&
1167 SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
1168 // Instead building a COPY to that vreg destination, build an
1169 // IMPLICIT_DEF instruction instead.
1170 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1171 TII->get(TargetOpcode::IMPLICIT_DEF), DestReg);
1172 break;
1173 }
1174 Register SrcReg;
1175 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
2
Assuming 'R' is null
3
Taking false branch
1176 SrcReg = R->getReg();
1177 else
1178 SrcReg = getVR(SrcVal, VRBaseMap);
4
The value of 'SrcVal' is assigned to 'Op.Node'
5
Calling 'InstrEmitter::getVR'
1179
1180 if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
1181 break;
1182
1183 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
1184 DestReg).addReg(SrcReg);
1185 break;
1186 }
1187 case ISD::CopyFromReg: {
1188 unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1189 EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap);
1190 break;
1191 }
1192 case ISD::EH_LABEL:
1193 case ISD::ANNOTATION_LABEL: {
1194 unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL)
1195 ? TargetOpcode::EH_LABEL
1196 : TargetOpcode::ANNOTATION_LABEL;
1197 MCSymbol *S = cast<LabelSDNode>(Node)->getLabel();
1198 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1199 TII->get(Opc)).addSym(S);
1200 break;
1201 }
1202
1203 case ISD::LIFETIME_START:
1204 case ISD::LIFETIME_END: {
1205 unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START)
1206 ? TargetOpcode::LIFETIME_START
1207 : TargetOpcode::LIFETIME_END;
1208 auto *FI = cast<FrameIndexSDNode>(Node->getOperand(1));
1209 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1210 .addFrameIndex(FI->getIndex());
1211 break;
1212 }
1213
1214 case ISD::PSEUDO_PROBE: {
1215 unsigned TarOp = TargetOpcode::PSEUDO_PROBE;
1216 auto Guid = cast<PseudoProbeSDNode>(Node)->getGuid();
1217 auto Index = cast<PseudoProbeSDNode>(Node)->getIndex();
1218 auto Attr = cast<PseudoProbeSDNode>(Node)->getAttributes();
1219
1220 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1221 .addImm(Guid)
1222 .addImm(Index)
1223 .addImm((uint8_t)PseudoProbeType::Block)
1224 .addImm(Attr);
1225 break;
1226 }
1227
1228 case ISD::INLINEASM:
1229 case ISD::INLINEASM_BR: {
1230 unsigned NumOps = Node->getNumOperands();
1231 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1232 --NumOps; // Ignore the glue operand.
1233
1234 // Create the inline asm machine instruction.
1235 unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR
1236 ? TargetOpcode::INLINEASM_BR
1237 : TargetOpcode::INLINEASM;
1238 MachineInstrBuilder MIB =
1239 BuildMI(*MF, Node->getDebugLoc(), TII->get(TgtOpc));
1240
1241 // Add the asm string as an external symbol operand.
1242 SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
1243 const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
1244 MIB.addExternalSymbol(AsmStr);
1245
1246 // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore
1247 // bits.
1248 int64_t ExtraInfo =
1249 cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))->
1250 getZExtValue();
1251 MIB.addImm(ExtraInfo);
1252
1253 // Remember to operand index of the group flags.
1254 SmallVector<unsigned, 8> GroupIdx;
1255
1256 // Remember registers that are part of early-clobber defs.
1257 SmallVector<unsigned, 8> ECRegs;
1258
1259 // Add all of the operand registers to the instruction.
1260 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1261 unsigned Flags =
1262 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1263 const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1264
1265 GroupIdx.push_back(MIB->getNumOperands());
1266 MIB.addImm(Flags);
1267 ++i; // Skip the ID value.
1268
1269 switch (InlineAsm::getKind(Flags)) {
1270 default: llvm_unreachable("Bad flags!")::llvm::llvm_unreachable_internal("Bad flags!", "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp"
, 1270)
;
1271 case InlineAsm::Kind_RegDef:
1272 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1273 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1274 // FIXME: Add dead flags for physical and virtual registers defined.
1275 // For now, mark physical register defs as implicit to help fast
1276 // regalloc. This makes inline asm look a lot like calls.
1277 MIB.addReg(Reg,
1278 RegState::Define |
1279 getImplRegState(Register::isPhysicalRegister(Reg)));
1280 }
1281 break;
1282 case InlineAsm::Kind_RegDefEarlyClobber:
1283 case InlineAsm::Kind_Clobber:
1284 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1285 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1286 MIB.addReg(Reg,
1287 RegState::Define | RegState::EarlyClobber |
1288 getImplRegState(Register::isPhysicalRegister(Reg)));
1289 ECRegs.push_back(Reg);
1290 }
1291 break;
1292 case InlineAsm::Kind_RegUse: // Use of register.
1293 case InlineAsm::Kind_Imm: // Immediate.
1294 case InlineAsm::Kind_Mem: // Addressing mode.
1295 // The addressing mode has been selected, just add all of the
1296 // operands to the machine instruction.
1297 for (unsigned j = 0; j != NumVals; ++j, ++i)
1298 AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap,
1299 /*IsDebug=*/false, IsClone, IsCloned);
1300
1301 // Manually set isTied bits.
1302 if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
1303 unsigned DefGroup = 0;
1304 if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
1305 unsigned DefIdx = GroupIdx[DefGroup] + 1;
1306 unsigned UseIdx = GroupIdx.back() + 1;
1307 for (unsigned j = 0; j != NumVals; ++j)
1308 MIB->tieOperands(DefIdx + j, UseIdx + j);
1309 }
1310 }
1311 break;
1312 }
1313 }
1314
1315 // GCC inline assembly allows input operands to also be early-clobber
1316 // output operands (so long as the operand is written only after it's
1317 // used), but this does not match the semantics of our early-clobber flag.
1318 // If an early-clobber operand register is also an input operand register,
1319 // then remove the early-clobber flag.
1320 for (unsigned Reg : ECRegs) {
1321 if (MIB->readsRegister(Reg, TRI)) {
1322 MachineOperand *MO =
1323 MIB->findRegisterDefOperand(Reg, false, false, TRI);
1324 assert(MO && "No def operand for clobbered register?")(static_cast <bool> (MO && "No def operand for clobbered register?"
) ? void (0) : __assert_fail ("MO && \"No def operand for clobbered register?\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1324, __extension__
__PRETTY_FUNCTION__))
;
1325 MO->setIsEarlyClobber(false);
1326 }
1327 }
1328
1329 // Get the mdnode from the asm if it exists and add it to the instruction.
1330 SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
1331 const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
1332 if (MD)
1333 MIB.addMetadata(MD);
1334
1335 MBB->insert(InsertPos, MIB);
1336 break;
1337 }
1338 }
1339}
1340
1341/// InstrEmitter - Construct an InstrEmitter and set it to start inserting
1342/// at the given position in the given block.
1343InstrEmitter::InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb,
1344 MachineBasicBlock::iterator insertpos)
1345 : MF(mbb->getParent()), MRI(&MF->getRegInfo()),
1346 TII(MF->getSubtarget().getInstrInfo()),
1347 TRI(MF->getSubtarget().getRegisterInfo()),
1348 TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb),
1349 InsertPos(insertpos) {
1350 EmitDebugInstrRefs = MF->useDebugInstrRef();
1351}

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61class GlobalValue;
62class MachineBasicBlock;
63class MachineConstantPoolValue;
64class MCSymbol;
65class raw_ostream;
66class SDNode;
67class SelectionDAG;
68class Type;
69class Value;
70
71void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
72 bool force = false);
73
74/// This represents a list of ValueType's that has been intern'd by
75/// a SelectionDAG. Instances of this simple value class are returned by
76/// SelectionDAG::getVTList(...).
77///
78struct SDVTList {
79 const EVT *VTs;
80 unsigned int NumVTs;
81};
82
83namespace ISD {
84
85 /// Node predicates
86
87/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
88/// same constant or undefined, return true and return the constant value in
89/// \p SplatValue.
90bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
91
92/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
93/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
94/// true, it only checks BUILD_VECTOR.
95bool isConstantSplatVectorAllOnes(const SDNode *N,
96 bool BuildVectorOnly = false);
97
98/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
99/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
100/// only checks BUILD_VECTOR.
101bool isConstantSplatVectorAllZeros(const SDNode *N,
102 bool BuildVectorOnly = false);
103
104/// Return true if the specified node is a BUILD_VECTOR where all of the
105/// elements are ~0 or undef.
106bool isBuildVectorAllOnes(const SDNode *N);
107
108/// Return true if the specified node is a BUILD_VECTOR where all of the
109/// elements are 0 or undef.
110bool isBuildVectorAllZeros(const SDNode *N);
111
112/// Return true if the specified node is a BUILD_VECTOR node of all
113/// ConstantSDNode or undef.
114bool isBuildVectorOfConstantSDNodes(const SDNode *N);
115
116/// Return true if the specified node is a BUILD_VECTOR node of all
117/// ConstantFPSDNode or undef.
118bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
119
120/// Return true if the node has at least one operand and all operands of the
121/// specified node are ISD::UNDEF.
122bool allOperandsUndef(const SDNode *N);
123
124} // end namespace ISD
125
126//===----------------------------------------------------------------------===//
127/// Unlike LLVM values, Selection DAG nodes may return multiple
128/// values as the result of a computation. Many nodes return multiple values,
129/// from loads (which define a token and a return value) to ADDC (which returns
130/// a result and a carry value), to calls (which may return an arbitrary number
131/// of values).
132///
133/// As such, each use of a SelectionDAG computation must indicate the node that
134/// computes it as well as which return value to use from that node. This pair
135/// of information is represented with the SDValue value type.
136///
137class SDValue {
138 friend struct DenseMapInfo<SDValue>;
139
140 SDNode *Node = nullptr; // The node defining the value we are using.
141 unsigned ResNo = 0; // Which return value of the node we are using.
142
143public:
144 SDValue() = default;
145 SDValue(SDNode *node, unsigned resno);
146
147 /// get the index which selects a specific result in the SDNode
148 unsigned getResNo() const { return ResNo; }
149
150 /// get the SDNode which holds the desired result
151 SDNode *getNode() const { return Node; }
152
153 /// set the SDNode
154 void setNode(SDNode *N) { Node = N; }
155
156 inline SDNode *operator->() const { return Node; }
157
158 bool operator==(const SDValue &O) const {
159 return Node == O.Node && ResNo == O.ResNo;
160 }
161 bool operator!=(const SDValue &O) const {
162 return !operator==(O);
163 }
164 bool operator<(const SDValue &O) const {
165 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
166 }
167 explicit operator bool() const {
168 return Node != nullptr;
169 }
170
171 SDValue getValue(unsigned R) const {
172 return SDValue(Node, R);
173 }
174
175 /// Return true if this node is an operand of N.
176 bool isOperandOf(const SDNode *N) const;
177
178 /// Return the ValueType of the referenced return value.
179 inline EVT getValueType() const;
180
181 /// Return the simple ValueType of the referenced return value.
182 MVT getSimpleValueType() const {
183 return getValueType().getSimpleVT();
184 }
185
186 /// Returns the size of the value in bits.
187 ///
188 /// If the value type is a scalable vector type, the scalable property will
189 /// be set and the runtime size will be a positive integer multiple of the
190 /// base size.
191 TypeSize getValueSizeInBits() const {
192 return getValueType().getSizeInBits();
193 }
194
195 uint64_t getScalarValueSizeInBits() const {
196 return getValueType().getScalarType().getFixedSizeInBits();
197 }
198
199 // Forwarding methods - These forward to the corresponding methods in SDNode.
200 inline unsigned getOpcode() const;
201 inline unsigned getNumOperands() const;
202 inline const SDValue &getOperand(unsigned i) const;
203 inline uint64_t getConstantOperandVal(unsigned i) const;
204 inline const APInt &getConstantOperandAPInt(unsigned i) const;
205 inline bool isTargetMemoryOpcode() const;
206 inline bool isTargetOpcode() const;
207 inline bool isMachineOpcode() const;
208 inline bool isUndef() const;
209 inline unsigned getMachineOpcode() const;
210 inline const DebugLoc &getDebugLoc() const;
211 inline void dump() const;
212 inline void dump(const SelectionDAG *G) const;
213 inline void dumpr() const;
214 inline void dumpr(const SelectionDAG *G) const;
215
216 /// Return true if this operand (which must be a chain) reaches the
217 /// specified operand without crossing any side-effecting instructions.
218 /// In practice, this looks through token factors and non-volatile loads.
219 /// In order to remain efficient, this only
220 /// looks a couple of nodes in, it does not do an exhaustive search.
221 bool reachesChainWithoutSideEffects(SDValue Dest,
222 unsigned Depth = 2) const;
223
224 /// Return true if there are no nodes using value ResNo of Node.
225 inline bool use_empty() const;
226
227 /// Return true if there is exactly one node using value ResNo of Node.
228 inline bool hasOneUse() const;
229};
230
231template<> struct DenseMapInfo<SDValue> {
232 static inline SDValue getEmptyKey() {
233 SDValue V;
234 V.ResNo = -1U;
235 return V;
236 }
237
238 static inline SDValue getTombstoneKey() {
239 SDValue V;
240 V.ResNo = -2U;
241 return V;
242 }
243
244 static unsigned getHashValue(const SDValue &Val) {
245 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
246 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
247 }
248
249 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
250 return LHS == RHS;
251 }
252};
253
254/// Allow casting operators to work directly on
255/// SDValues as if they were SDNode*'s.
256template<> struct simplify_type<SDValue> {
257 using SimpleType = SDNode *;
258
259 static SimpleType getSimplifiedValue(SDValue &Val) {
260 return Val.getNode();
261 }
262};
263template<> struct simplify_type<const SDValue> {
264 using SimpleType = /*const*/ SDNode *;
265
266 static SimpleType getSimplifiedValue(const SDValue &Val) {
267 return Val.getNode();
268 }
269};
270
271/// Represents a use of a SDNode. This class holds an SDValue,
272/// which records the SDNode being used and the result number, a
273/// pointer to the SDNode using the value, and Next and Prev pointers,
274/// which link together all the uses of an SDNode.
275///
276class SDUse {
277 /// Val - The value being used.
278 SDValue Val;
279 /// User - The user of this value.
280 SDNode *User = nullptr;
281 /// Prev, Next - Pointers to the uses list of the SDNode referred by
282 /// this operand.
283 SDUse **Prev = nullptr;
284 SDUse *Next = nullptr;
285
286public:
287 SDUse() = default;
288 SDUse(const SDUse &U) = delete;
289 SDUse &operator=(const SDUse &) = delete;
290
291 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
292 operator const SDValue&() const { return Val; }
293
294 /// If implicit conversion to SDValue doesn't work, the get() method returns
295 /// the SDValue.
296 const SDValue &get() const { return Val; }
297
298 /// This returns the SDNode that contains this Use.
299 SDNode *getUser() { return User; }
300
301 /// Get the next SDUse in the use list.
302 SDUse *getNext() const { return Next; }
303
304 /// Convenience function for get().getNode().
305 SDNode *getNode() const { return Val.getNode(); }
306 /// Convenience function for get().getResNo().
307 unsigned getResNo() const { return Val.getResNo(); }
308 /// Convenience function for get().getValueType().
309 EVT getValueType() const { return Val.getValueType(); }
310
311 /// Convenience function for get().operator==
312 bool operator==(const SDValue &V) const {
313 return Val == V;
314 }
315
316 /// Convenience function for get().operator!=
317 bool operator!=(const SDValue &V) const {
318 return Val != V;
319 }
320
321 /// Convenience function for get().operator<
322 bool operator<(const SDValue &V) const {
323 return Val < V;
324 }
325
326private:
327 friend class SelectionDAG;
328 friend class SDNode;
329 // TODO: unfriend HandleSDNode once we fix its operand handling.
330 friend class HandleSDNode;
331
332 void setUser(SDNode *p) { User = p; }
333
334 /// Remove this use from its existing use list, assign it the
335 /// given value, and add it to the new value's node's use list.
336 inline void set(const SDValue &V);
337 /// Like set, but only supports initializing a newly-allocated
338 /// SDUse with a non-null value.
339 inline void setInitial(const SDValue &V);
340 /// Like set, but only sets the Node portion of the value,
341 /// leaving the ResNo portion unmodified.
342 inline void setNode(SDNode *N);
343
344 void addToList(SDUse **List) {
345 Next = *List;
346 if (Next) Next->Prev = &Next;
347 Prev = List;
348 *List = this;
349 }
350
351 void removeFromList() {
352 *Prev = Next;
353 if (Next) Next->Prev = Prev;
354 }
355};
356
357/// simplify_type specializations - Allow casting operators to work directly on
358/// SDValues as if they were SDNode*'s.
359template<> struct simplify_type<SDUse> {
360 using SimpleType = SDNode *;
361
362 static SimpleType getSimplifiedValue(SDUse &Val) {
363 return Val.getNode();
364 }
365};
366
367/// These are IR-level optimization flags that may be propagated to SDNodes.
368/// TODO: This data structure should be shared by the IR optimizer and the
369/// the backend.
370struct SDNodeFlags {
371private:
372 bool NoUnsignedWrap : 1;
373 bool NoSignedWrap : 1;
374 bool Exact : 1;
375 bool NoNaNs : 1;
376 bool NoInfs : 1;
377 bool NoSignedZeros : 1;
378 bool AllowReciprocal : 1;
379 bool AllowContract : 1;
380 bool ApproximateFuncs : 1;
381 bool AllowReassociation : 1;
382
383 // We assume instructions do not raise floating-point exceptions by default,
384 // and only those marked explicitly may do so. We could choose to represent
385 // this via a positive "FPExcept" flags like on the MI level, but having a
386 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
387 // intersection logic more straightforward.
388 bool NoFPExcept : 1;
389
390public:
391 /// Default constructor turns off all optimization flags.
392 SDNodeFlags()
393 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
394 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
395 AllowContract(false), ApproximateFuncs(false),
396 AllowReassociation(false), NoFPExcept(false) {}
397
398 /// Propagate the fast-math-flags from an IR FPMathOperator.
399 void copyFMF(const FPMathOperator &FPMO) {
400 setNoNaNs(FPMO.hasNoNaNs());
401 setNoInfs(FPMO.hasNoInfs());
402 setNoSignedZeros(FPMO.hasNoSignedZeros());
403 setAllowReciprocal(FPMO.hasAllowReciprocal());
404 setAllowContract(FPMO.hasAllowContract());
405 setApproximateFuncs(FPMO.hasApproxFunc());
406 setAllowReassociation(FPMO.hasAllowReassoc());
407 }
408
409 // These are mutators for each flag.
410 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
411 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
412 void setExact(bool b) { Exact = b; }
413 void setNoNaNs(bool b) { NoNaNs = b; }
414 void setNoInfs(bool b) { NoInfs = b; }
415 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
416 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
417 void setAllowContract(bool b) { AllowContract = b; }
418 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
419 void setAllowReassociation(bool b) { AllowReassociation = b; }
420 void setNoFPExcept(bool b) { NoFPExcept = b; }
421
422 // These are accessors for each flag.
423 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
424 bool hasNoSignedWrap() const { return NoSignedWrap; }
425 bool hasExact() const { return Exact; }
426 bool hasNoNaNs() const { return NoNaNs; }
427 bool hasNoInfs() const { return NoInfs; }
428 bool hasNoSignedZeros() const { return NoSignedZeros; }
429 bool hasAllowReciprocal() const { return AllowReciprocal; }
430 bool hasAllowContract() const { return AllowContract; }
431 bool hasApproximateFuncs() const { return ApproximateFuncs; }
432 bool hasAllowReassociation() const { return AllowReassociation; }
433 bool hasNoFPExcept() const { return NoFPExcept; }
434
435 /// Clear any flags in this flag set that aren't also set in Flags. All
436 /// flags will be cleared if Flags are undefined.
437 void intersectWith(const SDNodeFlags Flags) {
438 NoUnsignedWrap &= Flags.NoUnsignedWrap;
439 NoSignedWrap &= Flags.NoSignedWrap;
440 Exact &= Flags.Exact;
441 NoNaNs &= Flags.NoNaNs;
442 NoInfs &= Flags.NoInfs;
443 NoSignedZeros &= Flags.NoSignedZeros;
444 AllowReciprocal &= Flags.AllowReciprocal;
445 AllowContract &= Flags.AllowContract;
446 ApproximateFuncs &= Flags.ApproximateFuncs;
447 AllowReassociation &= Flags.AllowReassociation;
448 NoFPExcept &= Flags.NoFPExcept;
449 }
450};
451
452/// Represents one node in the SelectionDAG.
453///
454class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
455private:
456 /// The operation that this node performs.
457 int16_t NodeType;
458
459protected:
460 // We define a set of mini-helper classes to help us interpret the bits in our
461 // SubclassData. These are designed to fit within a uint16_t so they pack
462 // with NodeType.
463
464#if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1))
465// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
466// and give the `pack` pragma push semantics.
467#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
468#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
469#else
470#define BEGIN_TWO_BYTE_PACK()
471#define END_TWO_BYTE_PACK()
472#endif
473
474BEGIN_TWO_BYTE_PACK()
475 class SDNodeBitfields {
476 friend class SDNode;
477 friend class MemIntrinsicSDNode;
478 friend class MemSDNode;
479 friend class SelectionDAG;
480
481 uint16_t HasDebugValue : 1;
482 uint16_t IsMemIntrinsic : 1;
483 uint16_t IsDivergent : 1;
484 };
485 enum { NumSDNodeBits = 3 };
486
487 class ConstantSDNodeBitfields {
488 friend class ConstantSDNode;
489
490 uint16_t : NumSDNodeBits;
491
492 uint16_t IsOpaque : 1;
493 };
494
495 class MemSDNodeBitfields {
496 friend class MemSDNode;
497 friend class MemIntrinsicSDNode;
498 friend class AtomicSDNode;
499
500 uint16_t : NumSDNodeBits;
501
502 uint16_t IsVolatile : 1;
503 uint16_t IsNonTemporal : 1;
504 uint16_t IsDereferenceable : 1;
505 uint16_t IsInvariant : 1;
506 };
507 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
508
509 class LSBaseSDNodeBitfields {
510 friend class LSBaseSDNode;
511 friend class VPLoadStoreSDNode;
512 friend class MaskedLoadStoreSDNode;
513 friend class MaskedGatherScatterSDNode;
514 friend class VPGatherScatterSDNode;
515
516 uint16_t : NumMemSDNodeBits;
517
518 // This storage is shared between disparate class hierarchies to hold an
519 // enumeration specific to the class hierarchy in use.
520 // LSBaseSDNode => enum ISD::MemIndexedMode
521 // VPLoadStoreBaseSDNode => enum ISD::MemIndexedMode
522 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
523 // VPGatherScatterSDNode => enum ISD::MemIndexType
524 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
525 uint16_t AddressingMode : 3;
526 };
527 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
528
529 class LoadSDNodeBitfields {
530 friend class LoadSDNode;
531 friend class VPLoadSDNode;
532 friend class MaskedLoadSDNode;
533 friend class MaskedGatherSDNode;
534 friend class VPGatherSDNode;
535
536 uint16_t : NumLSBaseSDNodeBits;
537
538 uint16_t ExtTy : 2; // enum ISD::LoadExtType
539 uint16_t IsExpanding : 1;
540 };
541
542 class StoreSDNodeBitfields {
543 friend class StoreSDNode;
544 friend class VPStoreSDNode;
545 friend class MaskedStoreSDNode;
546 friend class MaskedScatterSDNode;
547 friend class VPScatterSDNode;
548
549 uint16_t : NumLSBaseSDNodeBits;
550
551 uint16_t IsTruncating : 1;
552 uint16_t IsCompressing : 1;
553 };
554
555 union {
556 char RawSDNodeBits[sizeof(uint16_t)];
557 SDNodeBitfields SDNodeBits;
558 ConstantSDNodeBitfields ConstantSDNodeBits;
559 MemSDNodeBitfields MemSDNodeBits;
560 LSBaseSDNodeBitfields LSBaseSDNodeBits;
561 LoadSDNodeBitfields LoadSDNodeBits;
562 StoreSDNodeBitfields StoreSDNodeBits;
563 };
564END_TWO_BYTE_PACK()
565#undef BEGIN_TWO_BYTE_PACK
566#undef END_TWO_BYTE_PACK
567
568 // RawSDNodeBits must cover the entirety of the union. This means that all of
569 // the union's members must have size <= RawSDNodeBits. We write the RHS as
570 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
571 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
572 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
573 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
574 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
575 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
576 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
577
578private:
579 friend class SelectionDAG;
580 // TODO: unfriend HandleSDNode once we fix its operand handling.
581 friend class HandleSDNode;
582
583 /// Unique id per SDNode in the DAG.
584 int NodeId = -1;
585
586 /// The values that are used by this operation.
587 SDUse *OperandList = nullptr;
588
589 /// The types of the values this node defines. SDNode's may
590 /// define multiple values simultaneously.
591 const EVT *ValueList;
592
593 /// List of uses for this SDNode.
594 SDUse *UseList = nullptr;
595
596 /// The number of entries in the Operand/Value list.
597 unsigned short NumOperands = 0;
598 unsigned short NumValues;
599
600 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
601 // original LLVM instructions.
602 // This is used for turning off scheduling, because we'll forgo
603 // the normal scheduling algorithms and output the instructions according to
604 // this ordering.
605 unsigned IROrder;
606
607 /// Source line information.
608 DebugLoc debugLoc;
609
610 /// Return a pointer to the specified value type.
611 static const EVT *getValueTypeList(EVT VT);
612
613 SDNodeFlags Flags;
614
615public:
616 /// Unique and persistent id per SDNode in the DAG.
617 /// Used for debug printing.
618 uint16_t PersistentId;
619
620 //===--------------------------------------------------------------------===//
621 // Accessors
622 //
623
624 /// Return the SelectionDAG opcode value for this node. For
625 /// pre-isel nodes (those for which isMachineOpcode returns false), these
626 /// are the opcode values in the ISD and <target>ISD namespaces. For
627 /// post-isel opcodes, see getMachineOpcode.
628 unsigned getOpcode() const { return (unsigned short)NodeType; }
629
630 /// Test if this node has a target-specific opcode (in the
631 /// \<target\>ISD namespace).
632 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
633
634 /// Test if this node has a target-specific opcode that may raise
635 /// FP exceptions (in the \<target\>ISD namespace and greater than
636 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
637 /// opcode are currently automatically considered to possibly raise
638 /// FP exceptions as well.
639 bool isTargetStrictFPOpcode() const {
640 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
641 }
642
643 /// Test if this node has a target-specific
644 /// memory-referencing opcode (in the \<target\>ISD namespace and
645 /// greater than FIRST_TARGET_MEMORY_OPCODE).
646 bool isTargetMemoryOpcode() const {
647 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
648 }
649
650 /// Return true if the type of the node type undefined.
651 bool isUndef() const { return NodeType == ISD::UNDEF; }
652
653 /// Test if this node is a memory intrinsic (with valid pointer information).
654 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
655 /// non-memory intrinsics (with chains) that are not really instances of
656 /// MemSDNode. For such nodes, we need some extra state to determine the
657 /// proper classof relationship.
658 bool isMemIntrinsic() const {
659 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
660 NodeType == ISD::INTRINSIC_VOID) &&
661 SDNodeBits.IsMemIntrinsic;
662 }
663
664 /// Test if this node is a strict floating point pseudo-op.
665 bool isStrictFPOpcode() {
666 switch (NodeType) {
667 default:
668 return false;
669 case ISD::STRICT_FP16_TO_FP:
670 case ISD::STRICT_FP_TO_FP16:
671#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
672 case ISD::STRICT_##DAGN:
673#include "llvm/IR/ConstrainedOps.def"
674 return true;
675 }
676 }
677
678 /// Test if this node is a vector predication operation.
679 bool isVPOpcode() const { return ISD::isVPOpcode(getOpcode()); }
680
681 /// Test if this node has a post-isel opcode, directly
682 /// corresponding to a MachineInstr opcode.
683 bool isMachineOpcode() const { return NodeType < 0; }
684
685 /// This may only be called if isMachineOpcode returns
686 /// true. It returns the MachineInstr opcode value that the node's opcode
687 /// corresponds to.
688 unsigned getMachineOpcode() const {
689 assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast <bool> (isMachineOpcode() && "Not a MachineInstr opcode!"
) ? void (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 689, __extension__
__PRETTY_FUNCTION__))
;
690 return ~NodeType;
691 }
692
693 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
694 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
695
696 bool isDivergent() const { return SDNodeBits.IsDivergent; }
697
698 /// Return true if there are no uses of this node.
699 bool use_empty() const { return UseList == nullptr; }
700
701 /// Return true if there is exactly one use of this node.
702 bool hasOneUse() const { return hasSingleElement(uses()); }
703
704 /// Return the number of uses of this node. This method takes
705 /// time proportional to the number of uses.
706 size_t use_size() const { return std::distance(use_begin(), use_end()); }
707
708 /// Return the unique node id.
709 int getNodeId() const { return NodeId; }
710
711 /// Set unique node id.
712 void setNodeId(int Id) { NodeId = Id; }
713
714 /// Return the node ordering.
715 unsigned getIROrder() const { return IROrder; }
716
717 /// Set the node ordering.
718 void setIROrder(unsigned Order) { IROrder = Order; }
719
720 /// Return the source location info.
721 const DebugLoc &getDebugLoc() const { return debugLoc; }
722
723 /// Set source location info. Try to avoid this, putting
724 /// it in the constructor is preferable.
725 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
726
727 /// This class provides iterator support for SDUse
728 /// operands that use a specific SDNode.
729 class use_iterator {
730 friend class SDNode;
731
732 SDUse *Op = nullptr;
733
734 explicit use_iterator(SDUse *op) : Op(op) {}
735
736 public:
737 using iterator_category = std::forward_iterator_tag;
738 using value_type = SDUse;
739 using difference_type = std::ptrdiff_t;
740 using pointer = value_type *;
741 using reference = value_type &;
742
743 use_iterator() = default;
744 use_iterator(const use_iterator &I) : Op(I.Op) {}
745
746 bool operator==(const use_iterator &x) const {
747 return Op == x.Op;
748 }
749 bool operator!=(const use_iterator &x) const {
750 return !operator==(x);
751 }
752
753 /// Return true if this iterator is at the end of uses list.
754 bool atEnd() const { return Op == nullptr; }
755
756 // Iterator traversal: forward iteration only.
757 use_iterator &operator++() { // Preincrement
758 assert(Op && "Cannot increment end iterator!")(static_cast <bool> (Op && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 758, __extension__
__PRETTY_FUNCTION__))
;
759 Op = Op->getNext();
760 return *this;
761 }
762
763 use_iterator operator++(int) { // Postincrement
764 use_iterator tmp = *this; ++*this; return tmp;
765 }
766
767 /// Retrieve a pointer to the current user node.
768 SDNode *operator*() const {
769 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 769, __extension__
__PRETTY_FUNCTION__))
;
770 return Op->getUser();
771 }
772
773 SDNode *operator->() const { return operator*(); }
774
775 SDUse &getUse() const { return *Op; }
776
777 /// Retrieve the operand # of this use in its user.
778 unsigned getOperandNo() const {
779 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 779, __extension__
__PRETTY_FUNCTION__))
;
780 return (unsigned)(Op - Op->getUser()->OperandList);
781 }
782 };
783
784 /// Provide iteration support to walk over all uses of an SDNode.
785 use_iterator use_begin() const {
786 return use_iterator(UseList);
787 }
788
789 static use_iterator use_end() { return use_iterator(nullptr); }
790
791 inline iterator_range<use_iterator> uses() {
792 return make_range(use_begin(), use_end());
793 }
794 inline iterator_range<use_iterator> uses() const {
795 return make_range(use_begin(), use_end());
796 }
797
798 /// Return true if there are exactly NUSES uses of the indicated value.
799 /// This method ignores uses of other values defined by this operation.
800 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
801
802 /// Return true if there are any use of the indicated value.
803 /// This method ignores uses of other values defined by this operation.
804 bool hasAnyUseOfValue(unsigned Value) const;
805
806 /// Return true if this node is the only use of N.
807 bool isOnlyUserOf(const SDNode *N) const;
808
809 /// Return true if this node is an operand of N.
810 bool isOperandOf(const SDNode *N) const;
811
812 /// Return true if this node is a predecessor of N.
813 /// NOTE: Implemented on top of hasPredecessor and every bit as
814 /// expensive. Use carefully.
815 bool isPredecessorOf(const SDNode *N) const {
816 return N->hasPredecessor(this);
817 }
818
819 /// Return true if N is a predecessor of this node.
820 /// N is either an operand of this node, or can be reached by recursively
821 /// traversing up the operands.
822 /// NOTE: This is an expensive method. Use it carefully.
823 bool hasPredecessor(const SDNode *N) const;
824
825 /// Returns true if N is a predecessor of any node in Worklist. This
826 /// helper keeps Visited and Worklist sets externally to allow unions
827 /// searches to be performed in parallel, caching of results across
828 /// queries and incremental addition to Worklist. Stops early if N is
829 /// found but will resume. Remember to clear Visited and Worklists
830 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
831 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
832 /// topologically ordered (Operands have strictly smaller node id) and search
833 /// can be pruned leveraging this.
834 static bool hasPredecessorHelper(const SDNode *N,
835 SmallPtrSetImpl<const SDNode *> &Visited,
836 SmallVectorImpl<const SDNode *> &Worklist,
837 unsigned int MaxSteps = 0,
838 bool TopologicalPrune = false) {
839 SmallVector<const SDNode *, 8> DeferredNodes;
840 if (Visited.count(N))
841 return true;
842
843 // Node Id's are assigned in three places: As a topological
844 // ordering (> 0), during legalization (results in values set to
845 // 0), new nodes (set to -1). If N has a topolgical id then we
846 // know that all nodes with ids smaller than it cannot be
847 // successors and we need not check them. Filter out all node
848 // that can't be matches. We add them to the worklist before exit
849 // in case of multiple calls. Note that during selection the topological id
850 // may be violated if a node's predecessor is selected before it. We mark
851 // this at selection negating the id of unselected successors and
852 // restricting topological pruning to positive ids.
853
854 int NId = N->getNodeId();
855 // If we Invalidated the Id, reconstruct original NId.
856 if (NId < -1)
857 NId = -(NId + 1);
858
859 bool Found = false;
860 while (!Worklist.empty()) {
861 const SDNode *M = Worklist.pop_back_val();
862 int MId = M->getNodeId();
863 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
864 (MId > 0) && (MId < NId)) {
865 DeferredNodes.push_back(M);
866 continue;
867 }
868 for (const SDValue &OpV : M->op_values()) {
869 SDNode *Op = OpV.getNode();
870 if (Visited.insert(Op).second)
871 Worklist.push_back(Op);
872 if (Op == N)
873 Found = true;
874 }
875 if (Found)
876 break;
877 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
878 break;
879 }
880 // Push deferred nodes back on worklist.
881 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
882 // If we bailed early, conservatively return found.
883 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
884 return true;
885 return Found;
886 }
887
888 /// Return true if all the users of N are contained in Nodes.
889 /// NOTE: Requires at least one match, but doesn't require them all.
890 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
891
892 /// Return the number of values used by this operation.
893 unsigned getNumOperands() const { return NumOperands; }
894
895 /// Return the maximum number of operands that a SDNode can hold.
896 static constexpr size_t getMaxNumOperands() {
897 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
898 }
899
900 /// Helper method returns the integer value of a ConstantSDNode operand.
901 inline uint64_t getConstantOperandVal(unsigned Num) const;
902
903 /// Helper method returns the APInt of a ConstantSDNode operand.
904 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
905
906 const SDValue &getOperand(unsigned Num) const {
907 assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast <bool> (Num < NumOperands && "Invalid child # of SDNode!"
) ? void (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 907, __extension__
__PRETTY_FUNCTION__))
;
908 return OperandList[Num];
909 }
910
911 using op_iterator = SDUse *;
912
913 op_iterator op_begin() const { return OperandList; }
914 op_iterator op_end() const { return OperandList+NumOperands; }
915 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
916
917 /// Iterator for directly iterating over the operand SDValue's.
918 struct value_op_iterator
919 : iterator_adaptor_base<value_op_iterator, op_iterator,
920 std::random_access_iterator_tag, SDValue,
921 ptrdiff_t, value_op_iterator *,
922 value_op_iterator *> {
923 explicit value_op_iterator(SDUse *U = nullptr)
924 : iterator_adaptor_base(U) {}
925
926 const SDValue &operator*() const { return I->get(); }
927 };
928
929 iterator_range<value_op_iterator> op_values() const {
930 return make_range(value_op_iterator(op_begin()),
931 value_op_iterator(op_end()));
932 }
933
934 SDVTList getVTList() const {
935 SDVTList X = { ValueList, NumValues };
936 return X;
937 }
938
939 /// If this node has a glue operand, return the node
940 /// to which the glue operand points. Otherwise return NULL.
941 SDNode *getGluedNode() const {
942 if (getNumOperands() != 0 &&
943 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
944 return getOperand(getNumOperands()-1).getNode();
945 return nullptr;
946 }
947
948 /// If this node has a glue value with a user, return
949 /// the user (there is at most one). Otherwise return NULL.
950 SDNode *getGluedUser() const {
951 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
952 if (UI.getUse().get().getValueType() == MVT::Glue)
953 return *UI;
954 return nullptr;
955 }
956
957 SDNodeFlags getFlags() const { return Flags; }
958 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
959
960 /// Clear any flags in this node that aren't also set in Flags.
961 /// If Flags is not in a defined state then this has no effect.
962 void intersectFlagsWith(const SDNodeFlags Flags);
963
964 /// Return the number of values defined/returned by this operator.
965 unsigned getNumValues() const { return NumValues; }
966
967 /// Return the type of a specified result.
968 EVT getValueType(unsigned ResNo) const {
969 assert(ResNo < NumValues && "Illegal result number!")(static_cast <bool> (ResNo < NumValues && "Illegal result number!"
) ? void (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 969, __extension__
__PRETTY_FUNCTION__))
;
970 return ValueList[ResNo];
971 }
972
973 /// Return the type of a specified result as a simple type.
974 MVT getSimpleValueType(unsigned ResNo) const {
975 return getValueType(ResNo).getSimpleVT();
976 }
977
978 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
979 ///
980 /// If the value type is a scalable vector type, the scalable property will
981 /// be set and the runtime size will be a positive integer multiple of the
982 /// base size.
983 TypeSize getValueSizeInBits(unsigned ResNo) const {
984 return getValueType(ResNo).getSizeInBits();
985 }
986
987 using value_iterator = const EVT *;
988
989 value_iterator value_begin() const { return ValueList; }
990 value_iterator value_end() const { return ValueList+NumValues; }
991 iterator_range<value_iterator> values() const {
992 return llvm::make_range(value_begin(), value_end());
993 }
994
995 /// Return the opcode of this operation for printing.
996 std::string getOperationName(const SelectionDAG *G = nullptr) const;
997 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
998 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
999 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
1000 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1001 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1002
1003 /// Print a SelectionDAG node and all children down to
1004 /// the leaves. The given SelectionDAG allows target-specific nodes
1005 /// to be printed in human-readable form. Unlike printr, this will
1006 /// print the whole DAG, including children that appear multiple
1007 /// times.
1008 ///
1009 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1010
1011 /// Print a SelectionDAG node and children up to
1012 /// depth "depth." The given SelectionDAG allows target-specific
1013 /// nodes to be printed in human-readable form. Unlike printr, this
1014 /// will print children that appear multiple times wherever they are
1015 /// used.
1016 ///
1017 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1018 unsigned depth = 100) const;
1019
1020 /// Dump this node, for debugging.
1021 void dump() const;
1022
1023 /// Dump (recursively) this node and its use-def subgraph.
1024 void dumpr() const;
1025
1026 /// Dump this node, for debugging.
1027 /// The given SelectionDAG allows target-specific nodes to be printed
1028 /// in human-readable form.
1029 void dump(const SelectionDAG *G) const;
1030
1031 /// Dump (recursively) this node and its use-def subgraph.
1032 /// The given SelectionDAG allows target-specific nodes to be printed
1033 /// in human-readable form.
1034 void dumpr(const SelectionDAG *G) const;
1035
1036 /// printrFull to dbgs(). The given SelectionDAG allows
1037 /// target-specific nodes to be printed in human-readable form.
1038 /// Unlike dumpr, this will print the whole DAG, including children
1039 /// that appear multiple times.
1040 void dumprFull(const SelectionDAG *G = nullptr) const;
1041
1042 /// printrWithDepth to dbgs(). The given
1043 /// SelectionDAG allows target-specific nodes to be printed in
1044 /// human-readable form. Unlike dumpr, this will print children
1045 /// that appear multiple times wherever they are used.
1046 ///
1047 void dumprWithDepth(const SelectionDAG *G = nullptr,
1048 unsigned depth = 100) const;
1049
1050 /// Gather unique data for the node.
1051 void Profile(FoldingSetNodeID &ID) const;
1052
1053 /// This method should only be used by the SDUse class.
1054 void addUse(SDUse &U) { U.addToList(&UseList); }
1055
1056protected:
1057 static SDVTList getSDVTList(EVT VT) {
1058 SDVTList Ret = { getValueTypeList(VT), 1 };
1059 return Ret;
1060 }
1061
1062 /// Create an SDNode.
1063 ///
1064 /// SDNodes are created without any operands, and never own the operand
1065 /// storage. To add operands, see SelectionDAG::createOperands.
1066 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1067 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1068 IROrder(Order), debugLoc(std::move(dl)) {
1069 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1070 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1070, __extension__
__PRETTY_FUNCTION__))
;
1071 assert(NumValues == VTs.NumVTs &&(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1072, __extension__
__PRETTY_FUNCTION__))
1072 "NumValues wasn't wide enough for its operands!")(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1072, __extension__
__PRETTY_FUNCTION__))
;
1073 }
1074
1075 /// Release the operands and set this node to have zero operands.
1076 void DropOperands();
1077};
1078
1079/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1080/// into SDNode creation functions.
1081/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1082/// from the original Instruction, and IROrder is the ordinal position of
1083/// the instruction.
1084/// When an SDNode is created after the DAG is being built, both DebugLoc and
1085/// the IROrder are propagated from the original SDNode.
1086/// So SDLoc class provides two constructors besides the default one, one to
1087/// be used by the DAGBuilder, the other to be used by others.
1088class SDLoc {
1089private:
1090 DebugLoc DL;
1091 int IROrder = 0;
1092
1093public:
1094 SDLoc() = default;
1095 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1096 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1097 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1098 assert(Order >= 0 && "bad IROrder")(static_cast <bool> (Order >= 0 && "bad IROrder"
) ? void (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1098, __extension__
__PRETTY_FUNCTION__))
;
1099 if (I)
1100 DL = I->getDebugLoc();
1101 }
1102
1103 unsigned getIROrder() const { return IROrder; }
1104 const DebugLoc &getDebugLoc() const { return DL; }
1105};
1106
1107// Define inline functions from the SDValue class.
1108
1109inline SDValue::SDValue(SDNode *node, unsigned resno)
1110 : Node(node), ResNo(resno) {
1111 // Explicitly check for !ResNo to avoid use-after-free, because there are
1112 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1113 // combines.
1114 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1115, __extension__
__PRETTY_FUNCTION__))
1115 "Invalid result number for the given node!")(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1115, __extension__
__PRETTY_FUNCTION__))
;
1116 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast <bool> (ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? void (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1116, __extension__
__PRETTY_FUNCTION__))
;
1117}
1118
1119inline unsigned SDValue::getOpcode() const {
1120 return Node->getOpcode();
1121}
1122
1123inline EVT SDValue::getValueType() const {
1124 return Node->getValueType(ResNo);
1125}
1126
1127inline unsigned SDValue::getNumOperands() const {
1128 return Node->getNumOperands();
1129}
1130
1131inline const SDValue &SDValue::getOperand(unsigned i) const {
1132 return Node->getOperand(i);
1133}
1134
1135inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1136 return Node->getConstantOperandVal(i);
1137}
1138
1139inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1140 return Node->getConstantOperandAPInt(i);
1141}
1142
1143inline bool SDValue::isTargetOpcode() const {
1144 return Node->isTargetOpcode();
1145}
1146
1147inline bool SDValue::isTargetMemoryOpcode() const {
1148 return Node->isTargetMemoryOpcode();
1149}
1150
1151inline bool SDValue::isMachineOpcode() const {
1152 return Node->isMachineOpcode();
7
Called C++ object pointer is null
1153}
1154
1155inline unsigned SDValue::getMachineOpcode() const {
1156 return Node->getMachineOpcode();
1157}
1158
1159inline bool SDValue::isUndef() const {
1160 return Node->isUndef();
1161}
1162
1163inline bool SDValue::use_empty() const {
1164 return !Node->hasAnyUseOfValue(ResNo);
1165}
1166
1167inline bool SDValue::hasOneUse() const {
1168 return Node->hasNUsesOfValue(1, ResNo);
1169}
1170
1171inline const DebugLoc &SDValue::getDebugLoc() const {
1172 return Node->getDebugLoc();
1173}
1174
1175inline void SDValue::dump() const {
1176 return Node->dump();
1177}
1178
1179inline void SDValue::dump(const SelectionDAG *G) const {
1180 return Node->dump(G);
1181}
1182
1183inline void SDValue::dumpr() const {
1184 return Node->dumpr();
1185}
1186
1187inline void SDValue::dumpr(const SelectionDAG *G) const {
1188 return Node->dumpr(G);
1189}
1190
1191// Define inline functions from the SDUse class.
1192
1193inline void SDUse::set(const SDValue &V) {
1194 if (Val.getNode()) removeFromList();
1195 Val = V;
1196 if (V.getNode()) V.getNode()->addUse(*this);
1197}
1198
1199inline void SDUse::setInitial(const SDValue &V) {
1200 Val = V;
1201 V.getNode()->addUse(*this);
1202}
1203
1204inline void SDUse::setNode(SDNode *N) {
1205 if (Val.getNode()) removeFromList();
1206 Val.setNode(N);
1207 if (N) N->addUse(*this);
1208}
1209
1210/// This class is used to form a handle around another node that
1211/// is persistent and is updated across invocations of replaceAllUsesWith on its
1212/// operand. This node should be directly created by end-users and not added to
1213/// the AllNodes list.
1214class HandleSDNode : public SDNode {
1215 SDUse Op;
1216
1217public:
1218 explicit HandleSDNode(SDValue X)
1219 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1220 // HandleSDNodes are never inserted into the DAG, so they won't be
1221 // auto-numbered. Use ID 65535 as a sentinel.
1222 PersistentId = 0xffff;
1223
1224 // Manually set up the operand list. This node type is special in that it's
1225 // always stack allocated and SelectionDAG does not manage its operands.
1226 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1227 // be so special.
1228 Op.setUser(this);
1229 Op.setInitial(X);
1230 NumOperands = 1;
1231 OperandList = &Op;
1232 }
1233 ~HandleSDNode();
1234
1235 const SDValue &getValue() const { return Op; }
1236};
1237
1238class AddrSpaceCastSDNode : public SDNode {
1239private:
1240 unsigned SrcAddrSpace;
1241 unsigned DestAddrSpace;
1242
1243public:
1244 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1245 unsigned SrcAS, unsigned DestAS);
1246
1247 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1248 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1249
1250 static bool classof(const SDNode *N) {
1251 return N->getOpcode() == ISD::ADDRSPACECAST;
1252 }
1253};
1254
1255/// This is an abstract virtual class for memory operations.
1256class MemSDNode : public SDNode {
1257private:
1258 // VT of in-memory value.
1259 EVT MemoryVT;
1260
1261protected:
1262 /// Memory reference information.
1263 MachineMemOperand *MMO;
1264
1265public:
1266 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1267 EVT memvt, MachineMemOperand *MMO);
1268
1269 bool readMem() const { return MMO->isLoad(); }
1270 bool writeMem() const { return MMO->isStore(); }
1271
1272 /// Returns alignment and volatility of the memory access
1273 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1274 Align getAlign() const { return MMO->getAlign(); }
1275 // FIXME: Remove once transition to getAlign is over.
1276 unsigned getAlignment() const { return MMO->getAlign().value(); }
1277
1278 /// Return the SubclassData value, without HasDebugValue. This contains an
1279 /// encoding of the volatile flag, as well as bits used by subclasses. This
1280 /// function should only be used to compute a FoldingSetNodeID value.
1281 /// The HasDebugValue bit is masked out because CSE map needs to match
1282 /// nodes with debug info with nodes without debug info. Same is about
1283 /// isDivergent bit.
1284 unsigned getRawSubclassData() const {
1285 uint16_t Data;
1286 union {
1287 char RawSDNodeBits[sizeof(uint16_t)];
1288 SDNodeBitfields SDNodeBits;
1289 };
1290 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1291 SDNodeBits.HasDebugValue = 0;
1292 SDNodeBits.IsDivergent = false;
1293 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1294 return Data;
1295 }
1296
1297 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1298 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1299 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1300 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1301
1302 // Returns the offset from the location of the access.
1303 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1304
1305 /// Returns the AA info that describes the dereference.
1306 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1307
1308 /// Returns the Ranges that describes the dereference.
1309 const MDNode *getRanges() const { return MMO->getRanges(); }
1310
1311 /// Returns the synchronization scope ID for this memory operation.
1312 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1313
1314 /// Return the atomic ordering requirements for this memory operation. For
1315 /// cmpxchg atomic operations, return the atomic ordering requirements when
1316 /// store occurs.
1317 AtomicOrdering getSuccessOrdering() const {
1318 return MMO->getSuccessOrdering();
1319 }
1320
1321 /// Return a single atomic ordering that is at least as strong as both the
1322 /// success and failure orderings for an atomic operation. (For operations
1323 /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
1324 AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); }
1325
1326 /// Return true if the memory operation ordering is Unordered or higher.
1327 bool isAtomic() const { return MMO->isAtomic(); }
1328
1329 /// Returns true if the memory operation doesn't imply any ordering
1330 /// constraints on surrounding memory operations beyond the normal memory
1331 /// aliasing rules.
1332 bool isUnordered() const { return MMO->isUnordered(); }
1333
1334 /// Returns true if the memory operation is neither atomic or volatile.
1335 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1336
1337 /// Return the type of the in-memory value.
1338 EVT getMemoryVT() const { return MemoryVT; }
1339
1340 /// Return a MachineMemOperand object describing the memory
1341 /// reference performed by operation.
1342 MachineMemOperand *getMemOperand() const { return MMO; }
1343
1344 const MachinePointerInfo &getPointerInfo() const {
1345 return MMO->getPointerInfo();
1346 }
1347
1348 /// Return the address space for the associated pointer
1349 unsigned getAddressSpace() const {
1350 return getPointerInfo().getAddrSpace();
1351 }
1352
1353 /// Update this MemSDNode's MachineMemOperand information
1354 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1355 /// This must only be used when the new alignment applies to all users of
1356 /// this MachineMemOperand.
1357 void refineAlignment(const MachineMemOperand *NewMMO) {
1358 MMO->refineAlignment(NewMMO);
1359 }
1360
1361 const SDValue &getChain() const { return getOperand(0); }
1362
1363 const SDValue &getBasePtr() const {
1364 switch (getOpcode()) {
1365 case ISD::STORE:
1366 case ISD::VP_STORE:
1367 case ISD::MSTORE:
1368 case ISD::VP_SCATTER:
1369 return getOperand(2);
1370 case ISD::MGATHER:
1371 case ISD::MSCATTER:
1372 return getOperand(3);
1373 default:
1374 return getOperand(1);
1375 }
1376 }
1377
1378 // Methods to support isa and dyn_cast
1379 static bool classof(const SDNode *N) {
1380 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1381 // with either an intrinsic or a target opcode.
1382 switch (N->getOpcode()) {
1383 case ISD::LOAD:
1384 case ISD::STORE:
1385 case ISD::PREFETCH:
1386 case ISD::ATOMIC_CMP_SWAP:
1387 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
1388 case ISD::ATOMIC_SWAP:
1389 case ISD::ATOMIC_LOAD_ADD:
1390 case ISD::ATOMIC_LOAD_SUB:
1391 case ISD::ATOMIC_LOAD_AND:
1392 case ISD::ATOMIC_LOAD_CLR:
1393 case ISD::ATOMIC_LOAD_OR:
1394 case ISD::ATOMIC_LOAD_XOR:
1395 case ISD::ATOMIC_LOAD_NAND:
1396 case ISD::ATOMIC_LOAD_MIN:
1397 case ISD::ATOMIC_LOAD_MAX:
1398 case ISD::ATOMIC_LOAD_UMIN:
1399 case ISD::ATOMIC_LOAD_UMAX:
1400 case ISD::ATOMIC_LOAD_FADD:
1401 case ISD::ATOMIC_LOAD_FSUB:
1402 case ISD::ATOMIC_LOAD:
1403 case ISD::ATOMIC_STORE:
1404 case ISD::MLOAD:
1405 case ISD::MSTORE:
1406 case ISD::MGATHER:
1407 case ISD::MSCATTER:
1408 case ISD::VP_LOAD:
1409 case ISD::VP_STORE:
1410 case ISD::VP_GATHER:
1411 case ISD::VP_SCATTER:
1412 return true;
1413 default:
1414 return N->isMemIntrinsic() || N->isTargetMemoryOpcode();
1415 }
1416 }
1417};
1418
1419/// This is an SDNode representing atomic operations.
1420class AtomicSDNode : public MemSDNode {
1421public:
1422 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1423 EVT MemVT, MachineMemOperand *MMO)
1424 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1425 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1426, __extension__
__PRETTY_FUNCTION__))
1426 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1426, __extension__
__PRETTY_FUNCTION__))
;
1427 }
1428
1429 const SDValue &getBasePtr() const { return getOperand(1); }
1430 const SDValue &getVal() const { return getOperand(2); }
1431
1432 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1433 /// otherwise.
1434 bool isCompareAndSwap() const {
1435 unsigned Op = getOpcode();
1436 return Op == ISD::ATOMIC_CMP_SWAP ||
1437 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1438 }
1439
1440 /// For cmpxchg atomic operations, return the atomic ordering requirements
1441 /// when store does not occur.
1442 AtomicOrdering getFailureOrdering() const {
1443 assert(isCompareAndSwap() && "Must be cmpxchg operation")(static_cast <bool> (isCompareAndSwap() && "Must be cmpxchg operation"
) ? void (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1443, __extension__
__PRETTY_FUNCTION__))
;
1444 return MMO->getFailureOrdering();
1445 }
1446
1447 // Methods to support isa and dyn_cast
1448 static bool classof(const SDNode *N) {
1449 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1450 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1451 N->getOpcode() == ISD::ATOMIC_SWAP ||
1452 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1453 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1454 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1455 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1456 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1457 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1458 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1459 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1460 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1461 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1462 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1463 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1464 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1465 N->getOpcode() == ISD::ATOMIC_LOAD ||
1466 N->getOpcode() == ISD::ATOMIC_STORE;
1467 }
1468};
1469
1470/// This SDNode is used for target intrinsics that touch
1471/// memory and need an associated MachineMemOperand. Its opcode may be
1472/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1473/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1474class MemIntrinsicSDNode : public MemSDNode {
1475public:
1476 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1477 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1478 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1479 SDNodeBits.IsMemIntrinsic = true;
1480 }
1481
1482 // Methods to support isa and dyn_cast
1483 static bool classof(const SDNode *N) {
1484 // We lower some target intrinsics to their target opcode
1485 // early a node with a target opcode can be of this class
1486 return N->isMemIntrinsic() ||
1487 N->getOpcode() == ISD::PREFETCH ||
1488 N->isTargetMemoryOpcode();
1489 }
1490};
1491
1492/// This SDNode is used to implement the code generator
1493/// support for the llvm IR shufflevector instruction. It combines elements
1494/// from two input vectors into a new input vector, with the selection and
1495/// ordering of elements determined by an array of integers, referred to as
1496/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1497/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1498/// An index of -1 is treated as undef, such that the code generator may put
1499/// any value in the corresponding element of the result.
1500class ShuffleVectorSDNode : public SDNode {
1501 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1502 // is freed when the SelectionDAG object is destroyed.
1503 const int *Mask;
1504
1505protected:
1506 friend class SelectionDAG;
1507
1508 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1509 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1510
1511public:
1512 ArrayRef<int> getMask() const {
1513 EVT VT = getValueType(0);
1514 return makeArrayRef(Mask, VT.getVectorNumElements());
1515 }
1516
1517 int getMaskElt(unsigned Idx) const {
1518 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")(static_cast <bool> (Idx < getValueType(0).getVectorNumElements
() && "Idx out of range!") ? void (0) : __assert_fail
("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1518, __extension__
__PRETTY_FUNCTION__))
;
1519 return Mask[Idx];
1520 }
1521
1522 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1523
1524 int getSplatIndex() const {
1525 assert(isSplat() && "Cannot get splat index for non-splat!")(static_cast <bool> (isSplat() && "Cannot get splat index for non-splat!"
) ? void (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1525, __extension__
__PRETTY_FUNCTION__))
;
1526 EVT VT = getValueType(0);
1527 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1528 if (Mask[i] >= 0)
1529 return Mask[i];
1530
1531 // We can choose any index value here and be correct because all elements
1532 // are undefined. Return 0 for better potential for callers to simplify.
1533 return 0;
1534 }
1535
1536 static bool isSplatMask(const int *Mask, EVT VT);
1537
1538 /// Change values in a shuffle permute mask assuming
1539 /// the two vector operands have swapped position.
1540 static void commuteMask(MutableArrayRef<int> Mask) {
1541 unsigned NumElems = Mask.size();
1542 for (unsigned i = 0; i != NumElems; ++i) {
1543 int idx = Mask[i];
1544 if (idx < 0)
1545 continue;
1546 else if (idx < (int)NumElems)
1547 Mask[i] = idx + NumElems;
1548 else
1549 Mask[i] = idx - NumElems;
1550 }
1551 }
1552
1553 static bool classof(const SDNode *N) {
1554 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1555 }
1556};
1557
1558class ConstantSDNode : public SDNode {
1559 friend class SelectionDAG;
1560
1561 const ConstantInt *Value;
1562
1563 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1564 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1565 getSDVTList(VT)),
1566 Value(val) {
1567 ConstantSDNodeBits.IsOpaque = isOpaque;
1568 }
1569
1570public:
1571 const ConstantInt *getConstantIntValue() const { return Value; }
1572 const APInt &getAPIntValue() const { return Value->getValue(); }
1573 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1574 int64_t getSExtValue() const { return Value->getSExtValue(); }
1575 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1576 return Value->getLimitedValue(Limit);
1577 }
1578 MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
1579 Align getAlignValue() const { return Value->getAlignValue(); }
1580
1581 bool isOne() const { return Value->isOne(); }
1582 bool isZero() const { return Value->isZero(); }
1583 // NOTE: This is soft-deprecated. Please use `isZero()` instead.
1584 bool isNullValue() const { return isZero(); }
1585 bool isAllOnes() const { return Value->isMinusOne(); }
1586 // NOTE: This is soft-deprecated. Please use `isAllOnes()` instead.
1587 bool isAllOnesValue() const { return isAllOnes(); }
1588 bool isMaxSignedValue() const { return Value->isMaxValue(true); }
1589 bool isMinSignedValue() const { return Value->isMinValue(true); }
1590
1591 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1592
1593 static bool classof(const SDNode *N) {
1594 return N->getOpcode() == ISD::Constant ||
1595 N->getOpcode() == ISD::TargetConstant;
1596 }
1597};
1598
1599uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1600 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1601}
1602
1603const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1604 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1605}
1606
1607class ConstantFPSDNode : public SDNode {
1608 friend class SelectionDAG;
1609
1610 const ConstantFP *Value;
1611
1612 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1613 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1614 DebugLoc(), getSDVTList(VT)),
1615 Value(val) {}
1616
1617public:
1618 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1619 const ConstantFP *getConstantFPValue() const { return Value; }
1620
1621 /// Return true if the value is positive or negative zero.
1622 bool isZero() const { return Value->isZero(); }
1623
1624 /// Return true if the value is a NaN.
1625 bool isNaN() const { return Value->isNaN(); }
1626
1627 /// Return true if the value is an infinity
1628 bool isInfinity() const { return Value->isInfinity(); }
1629
1630 /// Return true if the value is negative.
1631 bool isNegative() const { return Value->isNegative(); }
1632
1633 /// We don't rely on operator== working on double values, as
1634 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1635 /// As such, this method can be used to do an exact bit-for-bit comparison of
1636 /// two floating point values.
1637
1638 /// We leave the version with the double argument here because it's just so
1639 /// convenient to write "2.0" and the like. Without this function we'd
1640 /// have to duplicate its logic everywhere it's called.
1641 bool isExactlyValue(double V) const {
1642 return Value->getValueAPF().isExactlyValue(V);
1643 }
1644 bool isExactlyValue(const APFloat& V) const;
1645
1646 static bool isValueValidForType(EVT VT, const APFloat& Val);
1647
1648 static bool classof(const SDNode *N) {
1649 return N->getOpcode() == ISD::ConstantFP ||
1650 N->getOpcode() == ISD::TargetConstantFP;
1651 }
1652};
1653
1654/// Returns true if \p V is a constant integer zero.
1655bool isNullConstant(SDValue V);
1656
1657/// Returns true if \p V is an FP constant with a value of positive zero.
1658bool isNullFPConstant(SDValue V);
1659
1660/// Returns true if \p V is an integer constant with all bits set.
1661bool isAllOnesConstant(SDValue V);
1662
1663/// Returns true if \p V is a constant integer one.
1664bool isOneConstant(SDValue V);
1665
1666/// Return the non-bitcasted source operand of \p V if it exists.
1667/// If \p V is not a bitcasted value, it is returned as-is.
1668SDValue peekThroughBitcasts(SDValue V);
1669
1670/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1671/// If \p V is not a bitcasted one-use value, it is returned as-is.
1672SDValue peekThroughOneUseBitcasts(SDValue V);
1673
1674/// Return the non-extracted vector source operand of \p V if it exists.
1675/// If \p V is not an extracted subvector, it is returned as-is.
1676SDValue peekThroughExtractSubvectors(SDValue V);
1677
1678/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1679/// constant is canonicalized to be operand 1.
1680bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1681
1682/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1683ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1684 bool AllowTruncation = false);
1685
1686/// Returns the SDNode if it is a demanded constant splat BuildVector or
1687/// constant int.
1688ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1689 bool AllowUndefs = false,
1690 bool AllowTruncation = false);
1691
1692/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1693ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1694
1695/// Returns the SDNode if it is a demanded constant splat BuildVector or
1696/// constant float.
1697ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1698 bool AllowUndefs = false);
1699
1700/// Return true if the value is a constant 0 integer or a splatted vector of
1701/// a constant 0 integer (with no undefs by default).
1702/// Build vector implicit truncation is not an issue for null values.
1703bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1704
1705/// Return true if the value is a constant 1 integer or a splatted vector of a
1706/// constant 1 integer (with no undefs).
1707/// Does not permit build vector implicit truncation.
1708bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false);
1709
1710/// Return true if the value is a constant -1 integer or a splatted vector of a
1711/// constant -1 integer (with no undefs).
1712/// Does not permit build vector implicit truncation.
1713bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false);
1714
1715/// Return true if \p V is either a integer or FP constant.
1716inline bool isIntOrFPConstant(SDValue V) {
1717 return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V);
1718}
1719
1720class GlobalAddressSDNode : public SDNode {
1721 friend class SelectionDAG;
1722
1723 const GlobalValue *TheGlobal;
1724 int64_t Offset;
1725 unsigned TargetFlags;
1726
1727 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1728 const GlobalValue *GA, EVT VT, int64_t o,
1729 unsigned TF);
1730
1731public:
1732 const GlobalValue *getGlobal() const { return TheGlobal; }
1733 int64_t getOffset() const { return Offset; }
1734 unsigned getTargetFlags() const { return TargetFlags; }
1735 // Return the address space this GlobalAddress belongs to.
1736 unsigned getAddressSpace() const;
1737
1738 static bool classof(const SDNode *N) {
1739 return N->getOpcode() == ISD::GlobalAddress ||
1740 N->getOpcode() == ISD::TargetGlobalAddress ||
1741 N->getOpcode() == ISD::GlobalTLSAddress ||
1742 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1743 }
1744};
1745
1746class FrameIndexSDNode : public SDNode {
1747 friend class SelectionDAG;
1748
1749 int FI;
1750
1751 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1752 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1753 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1754 }
1755
1756public:
1757 int getIndex() const { return FI; }
1758
1759 static bool classof(const SDNode *N) {
1760 return N->getOpcode() == ISD::FrameIndex ||
1761 N->getOpcode() == ISD::TargetFrameIndex;
1762 }
1763};
1764
1765/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1766/// the offet and size that are started/ended in the underlying FrameIndex.
1767class LifetimeSDNode : public SDNode {
1768 friend class SelectionDAG;
1769 int64_t Size;
1770 int64_t Offset; // -1 if offset is unknown.
1771
1772 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1773 SDVTList VTs, int64_t Size, int64_t Offset)
1774 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1775public:
1776 int64_t getFrameIndex() const {
1777 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1778 }
1779
1780 bool hasOffset() const { return Offset >= 0; }
1781 int64_t getOffset() const {
1782 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1782, __extension__
__PRETTY_FUNCTION__))
;
1783 return Offset;
1784 }
1785 int64_t getSize() const {
1786 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1786, __extension__
__PRETTY_FUNCTION__))
;
1787 return Size;
1788 }
1789
1790 // Methods to support isa and dyn_cast
1791 static bool classof(const SDNode *N) {
1792 return N->getOpcode() == ISD::LIFETIME_START ||
1793 N->getOpcode() == ISD::LIFETIME_END;
1794 }
1795};
1796
1797/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
1798/// the index of the basic block being probed. A pseudo probe serves as a place
1799/// holder and will be removed at the end of compilation. It does not have any
1800/// operand because we do not want the instruction selection to deal with any.
1801class PseudoProbeSDNode : public SDNode {
1802 friend class SelectionDAG;
1803 uint64_t Guid;
1804 uint64_t Index;
1805 uint32_t Attributes;
1806
1807 PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
1808 SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
1809 : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
1810 Attributes(Attr) {}
1811
1812public:
1813 uint64_t getGuid() const { return Guid; }
1814 uint64_t getIndex() const { return Index; }
1815 uint32_t getAttributes() const { return Attributes; }
1816
1817 // Methods to support isa and dyn_cast
1818 static bool classof(const SDNode *N) {
1819 return N->getOpcode() == ISD::PSEUDO_PROBE;
1820 }
1821};
1822
1823class JumpTableSDNode : public SDNode {
1824 friend class SelectionDAG;
1825
1826 int JTI;
1827 unsigned TargetFlags;
1828
1829 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1830 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1831 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1832 }
1833
1834public:
1835 int getIndex() const { return JTI; }
1836 unsigned getTargetFlags() const { return TargetFlags; }
1837
1838 static bool classof(const SDNode *N) {
1839 return N->getOpcode() == ISD::JumpTable ||
1840 N->getOpcode() == ISD::TargetJumpTable;
1841 }
1842};
1843
1844class ConstantPoolSDNode : public SDNode {
1845 friend class SelectionDAG;
1846
1847 union {
1848 const Constant *ConstVal;
1849 MachineConstantPoolValue *MachineCPVal;
1850 } Val;
1851 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1852 Align Alignment; // Minimum alignment requirement of CP.
1853 unsigned TargetFlags;
1854
1855 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1856 Align Alignment, unsigned TF)
1857 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1858 DebugLoc(), getSDVTList(VT)),
1859 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1860 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1860, __extension__
__PRETTY_FUNCTION__))
;
1861 Val.ConstVal = c;
1862 }
1863
1864 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o,
1865 Align Alignment, unsigned TF)
1866 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1867 DebugLoc(), getSDVTList(VT)),
1868 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1869 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1869, __extension__
__PRETTY_FUNCTION__))
;
1870 Val.MachineCPVal = v;
1871 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1872 }
1873
1874public:
1875 bool isMachineConstantPoolEntry() const {
1876 return Offset < 0;
1877 }
1878
1879 const Constant *getConstVal() const {
1880 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (!isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1880, __extension__
__PRETTY_FUNCTION__))
;
1881 return Val.ConstVal;
1882 }
1883
1884 MachineConstantPoolValue *getMachineCPVal() const {
1885 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1885, __extension__
__PRETTY_FUNCTION__))
;
1886 return Val.MachineCPVal;
1887 }
1888
1889 int getOffset() const {
1890 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1891 }
1892
1893 // Return the alignment of this constant pool object, which is either 0 (for
1894 // default alignment) or the desired value.
1895 Align getAlign() const { return Alignment; }
1896 unsigned getTargetFlags() const { return TargetFlags; }
1897
1898 Type *getType() const;
1899
1900 static bool classof(const SDNode *N) {
1901 return N->getOpcode() == ISD::ConstantPool ||
1902 N->getOpcode() == ISD::TargetConstantPool;
1903 }
1904};
1905
1906/// Completely target-dependent object reference.
1907class TargetIndexSDNode : public SDNode {
1908 friend class SelectionDAG;
1909
1910 unsigned TargetFlags;
1911 int Index;
1912 int64_t Offset;
1913
1914public:
1915 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1916 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1917 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1918
1919 unsigned getTargetFlags() const { return TargetFlags; }
1920 int getIndex() const { return Index; }
1921 int64_t getOffset() const { return Offset; }
1922
1923 static bool classof(const SDNode *N) {
1924 return N->getOpcode() == ISD::TargetIndex;
1925 }
1926};
1927
1928class BasicBlockSDNode : public SDNode {
1929 friend class SelectionDAG;
1930
1931 MachineBasicBlock *MBB;
1932
1933 /// Debug info is meaningful and potentially useful here, but we create
1934 /// blocks out of order when they're jumped to, which makes it a bit
1935 /// harder. Let's see if we need it first.
1936 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1937 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1938 {}
1939
1940public:
1941 MachineBasicBlock *getBasicBlock() const { return MBB; }
1942
1943 static bool classof(const SDNode *N) {
1944 return N->getOpcode() == ISD::BasicBlock;
1945 }
1946};
1947
1948/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1949class BuildVectorSDNode : public SDNode {
1950public:
1951 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1952 explicit BuildVectorSDNode() = delete;
1953
1954 /// Check if this is a constant splat, and if so, find the
1955 /// smallest element size that splats the vector. If MinSplatBits is
1956 /// nonzero, the element size must be at least that large. Note that the
1957 /// splat element may be the entire vector (i.e., a one element vector).
1958 /// Returns the splat element value in SplatValue. Any undefined bits in
1959 /// that value are zero, and the corresponding bits in the SplatUndef mask
1960 /// are set. The SplatBitSize value is set to the splat element size in
1961 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1962 /// undefined. isBigEndian describes the endianness of the target.
1963 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1964 unsigned &SplatBitSize, bool &HasAnyUndefs,
1965 unsigned MinSplatBits = 0,
1966 bool isBigEndian = false) const;
1967
1968 /// Returns the demanded splatted value or a null value if this is not a
1969 /// splat.
1970 ///
1971 /// The DemandedElts mask indicates the elements that must be in the splat.
1972 /// If passed a non-null UndefElements bitvector, it will resize it to match
1973 /// the vector width and set the bits where elements are undef.
1974 SDValue getSplatValue(const APInt &DemandedElts,
1975 BitVector *UndefElements = nullptr) const;
1976
1977 /// Returns the splatted value or a null value if this is not a splat.
1978 ///
1979 /// If passed a non-null UndefElements bitvector, it will resize it to match
1980 /// the vector width and set the bits where elements are undef.
1981 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1982
1983 /// Find the shortest repeating sequence of values in the build vector.
1984 ///
1985 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1986 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1987 ///
1988 /// Currently this must be a power-of-2 build vector.
1989 /// The DemandedElts mask indicates the elements that must be present,
1990 /// undemanded elements in Sequence may be null (SDValue()). If passed a
1991 /// non-null UndefElements bitvector, it will resize it to match the original
1992 /// vector width and set the bits where elements are undef. If result is
1993 /// false, Sequence will be empty.
1994 bool getRepeatedSequence(const APInt &DemandedElts,
1995 SmallVectorImpl<SDValue> &Sequence,
1996 BitVector *UndefElements = nullptr) const;
1997
1998 /// Find the shortest repeating sequence of values in the build vector.
1999 ///
2000 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
2001 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
2002 ///
2003 /// Currently this must be a power-of-2 build vector.
2004 /// If passed a non-null UndefElements bitvector, it will resize it to match
2005 /// the original vector width and set the bits where elements are undef.
2006 /// If result is false, Sequence will be empty.
2007 bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
2008 BitVector *UndefElements = nullptr) const;
2009
2010 /// Returns the demanded splatted constant or null if this is not a constant
2011 /// splat.
2012 ///
2013 /// The DemandedElts mask indicates the elements that must be in the splat.
2014 /// If passed a non-null UndefElements bitvector, it will resize it to match
2015 /// the vector width and set the bits where elements are undef.
2016 ConstantSDNode *
2017 getConstantSplatNode(const APInt &DemandedElts,
2018 BitVector *UndefElements = nullptr) const;
2019
2020 /// Returns the splatted constant or null if this is not a constant
2021 /// splat.
2022 ///
2023 /// If passed a non-null UndefElements bitvector, it will resize it to match
2024 /// the vector width and set the bits where elements are undef.
2025 ConstantSDNode *
2026 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
2027
2028 /// Returns the demanded splatted constant FP or null if this is not a
2029 /// constant FP splat.
2030 ///
2031 /// The DemandedElts mask indicates the elements that must be in the splat.
2032 /// If passed a non-null UndefElements bitvector, it will resize it to match
2033 /// the vector width and set the bits where elements are undef.
2034 ConstantFPSDNode *
2035 getConstantFPSplatNode(const APInt &DemandedElts,
2036 BitVector *UndefElements = nullptr) const;
2037
2038 /// Returns the splatted constant FP or null if this is not a constant
2039 /// FP splat.
2040 ///
2041 /// If passed a non-null UndefElements bitvector, it will resize it to match
2042 /// the vector width and set the bits where elements are undef.
2043 ConstantFPSDNode *
2044 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
2045
2046 /// If this is a constant FP splat and the splatted constant FP is an
2047 /// exact power or 2, return the log base 2 integer value. Otherwise,
2048 /// return -1.
2049 ///
2050 /// The BitWidth specifies the necessary bit precision.
2051 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
2052 uint32_t BitWidth) const;
2053
2054 /// Extract the raw bit data from a build vector of Undef, Constant or
2055 /// ConstantFP node elements. Each raw bit element will be \p
2056 /// DstEltSizeInBits wide, undef elements are treated as zero, and entirely
2057 /// undefined elements are flagged in \p UndefElements.
2058 bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits,
2059 SmallVectorImpl<APInt> &RawBitElements,
2060 BitVector &UndefElements) const;
2061
2062 bool isConstant() const;
2063
2064 /// Recast bit data \p SrcBitElements to \p DstEltSizeInBits wide elements.
2065 /// Undef elements are treated as zero, and entirely undefined elements are
2066 /// flagged in \p DstUndefElements.
2067 static void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits,
2068 SmallVectorImpl<APInt> &DstBitElements,
2069 ArrayRef<APInt> SrcBitElements,
2070 BitVector &DstUndefElements,
2071 const BitVector &SrcUndefElements);
2072
2073 static bool classof(const SDNode *N) {
2074 return N->getOpcode() == ISD::BUILD_VECTOR;
2075 }
2076};
2077
2078/// An SDNode that holds an arbitrary LLVM IR Value. This is
2079/// used when the SelectionDAG needs to make a simple reference to something
2080/// in the LLVM IR representation.
2081///
2082class SrcValueSDNode : public SDNode {
2083 friend class SelectionDAG;
2084
2085 const Value *V;
2086
2087 /// Create a SrcValue for a general value.
2088 explicit SrcValueSDNode(const Value *v)
2089 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2090
2091public:
2092 /// Return the contained Value.
2093 const Value *getValue() const { return V; }
2094
2095 static bool classof(const SDNode *N) {
2096 return N->getOpcode() == ISD::SRCVALUE;
2097 }
2098};
2099
2100class MDNodeSDNode : public SDNode {
2101 friend class SelectionDAG;
2102
2103 const MDNode *MD;
2104
2105 explicit MDNodeSDNode(const MDNode *md)
2106 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2107 {}
2108
2109public:
2110 const MDNode *getMD() const { return MD; }
2111
2112 static bool classof(const SDNode *N) {
2113 return N->getOpcode() == ISD::MDNODE_SDNODE;
2114 }
2115};
2116
2117class RegisterSDNode : public SDNode {
2118 friend class SelectionDAG;
2119
2120 Register Reg;
2121
2122 RegisterSDNode(Register reg, EVT VT)
2123 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2124
2125public:
2126 Register getReg() const { return Reg; }
2127
2128 static bool classof(const SDNode *N) {
2129 return N->getOpcode() == ISD::Register;
2130 }
2131};
2132
2133class RegisterMaskSDNode : public SDNode {
2134 friend class SelectionDAG;
2135
2136 // The memory for RegMask is not owned by the node.
2137 const uint32_t *RegMask;
2138
2139 RegisterMaskSDNode(const uint32_t *mask)
2140 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2141 RegMask(mask) {}
2142
2143public:
2144 const uint32_t *getRegMask() const { return RegMask; }
2145
2146 static bool classof(const SDNode *N) {
2147 return N->getOpcode() == ISD::RegisterMask;
2148 }
2149};
2150
2151class BlockAddressSDNode : public SDNode {
2152 friend class SelectionDAG;
2153
2154 const BlockAddress *BA;
2155 int64_t Offset;
2156 unsigned TargetFlags;
2157
2158 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2159 int64_t o, unsigned Flags)
2160 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2161 BA(ba), Offset(o), TargetFlags(Flags) {}
2162
2163public:
2164 const BlockAddress *getBlockAddress() const { return BA; }
2165 int64_t getOffset() const { return Offset; }
2166 unsigned getTargetFlags() const { return TargetFlags; }
2167
2168 static bool classof(const SDNode *N) {
2169 return N->getOpcode() == ISD::BlockAddress ||
2170 N->getOpcode() == ISD::TargetBlockAddress;
2171 }
2172};
2173
2174class LabelSDNode : public SDNode {
2175 friend class SelectionDAG;
2176
2177 MCSymbol *Label;
2178
2179 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2180 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2181 assert(LabelSDNode::classof(this) && "not a label opcode")(static_cast <bool> (LabelSDNode::classof(this) &&
"not a label opcode") ? void (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2181, __extension__
__PRETTY_FUNCTION__))
;
2182 }
2183
2184public:
2185 MCSymbol *getLabel() const { return Label; }
2186
2187 static bool classof(const SDNode *N) {
2188 return N->getOpcode() == ISD::EH_LABEL ||
2189 N->getOpcode() == ISD::ANNOTATION_LABEL;
2190 }
2191};
2192
2193class ExternalSymbolSDNode : public SDNode {
2194 friend class SelectionDAG;
2195
2196 const char *Symbol;
2197 unsigned TargetFlags;
2198
2199 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2200 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2201 DebugLoc(), getSDVTList(VT)),
2202 Symbol(Sym), TargetFlags(TF) {}
2203
2204public:
2205 const char *getSymbol() const { return Symbol; }
2206 unsigned getTargetFlags() const { return TargetFlags; }
2207
2208 static bool classof(const SDNode *N) {
2209 return N->getOpcode() == ISD::ExternalSymbol ||
2210 N->getOpcode() == ISD::TargetExternalSymbol;
2211 }
2212};
2213
2214class MCSymbolSDNode : public SDNode {
2215 friend class SelectionDAG;
2216
2217 MCSymbol *Symbol;
2218
2219 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2220 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2221
2222public:
2223 MCSymbol *getMCSymbol() const { return Symbol; }
2224
2225 static bool classof(const SDNode *N) {
2226 return N->getOpcode() == ISD::MCSymbol;
2227 }
2228};
2229
2230class CondCodeSDNode : public SDNode {
2231 friend class SelectionDAG;
2232
2233 ISD::CondCode Condition;
2234
2235 explicit CondCodeSDNode(ISD::CondCode Cond)
2236 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2237 Condition(Cond) {}
2238
2239public:
2240 ISD::CondCode get() const { return Condition; }
2241
2242 static bool classof(const SDNode *N) {
2243 return N->getOpcode() == ISD::CONDCODE;
2244 }
2245};
2246
2247/// This class is used to represent EVT's, which are used
2248/// to parameterize some operations.
2249class VTSDNode : public SDNode {
2250 friend class SelectionDAG;
2251
2252 EVT ValueType;
2253
2254 explicit VTSDNode(EVT VT)
2255 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2256 ValueType(VT) {}
2257
2258public:
2259 EVT getVT() const { return ValueType; }
2260
2261 static bool classof(const SDNode *N) {
2262 return N->getOpcode() == ISD::VALUETYPE;
2263 }
2264};
2265
2266/// Base class for LoadSDNode and StoreSDNode
2267class LSBaseSDNode : public MemSDNode {
2268public:
2269 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2270 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2271 MachineMemOperand *MMO)
2272 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2273 LSBaseSDNodeBits.AddressingMode = AM;
2274 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2274, __extension__
__PRETTY_FUNCTION__))
;
2275 }
2276
2277 const SDValue &getOffset() const {
2278 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2279 }
2280
2281 /// Return the addressing mode for this load or store:
2282 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2283 ISD::MemIndexedMode getAddressingMode() const {
2284 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2285 }
2286
2287 /// Return true if this is a pre/post inc/dec load/store.
2288 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2289
2290 /// Return true if this is NOT a pre/post inc/dec load/store.
2291 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2292
2293 static bool classof(const SDNode *N) {
2294 return N->getOpcode() == ISD::LOAD ||
2295 N->getOpcode() == ISD::STORE;
2296 }
2297};
2298
2299/// This class is used to represent ISD::LOAD nodes.
2300class LoadSDNode : public LSBaseSDNode {
2301 friend class SelectionDAG;
2302
2303 LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2304 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
2305 MachineMemOperand *MMO)
2306 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2307 LoadSDNodeBits.ExtTy = ETy;
2308 assert(readMem() && "Load MachineMemOperand is not a load!")(static_cast <bool> (readMem() && "Load MachineMemOperand is not a load!"
) ? void (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2308, __extension__
__PRETTY_FUNCTION__))
;
2309 assert(!writeMem() && "Load MachineMemOperand is a store!")(static_cast <bool> (!writeMem() && "Load MachineMemOperand is a store!"
) ? void (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2309, __extension__
__PRETTY_FUNCTION__))
;
2310 }
2311
2312public:
2313 /// Return whether this is a plain node,
2314 /// or one of the varieties of value-extending loads.
2315 ISD::LoadExtType getExtensionType() const {
2316 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2317 }
2318
2319 const SDValue &getBasePtr() const { return getOperand(1); }
2320 const SDValue &getOffset() const { return getOperand(2); }
2321
2322 static bool classof(const SDNode *N) {
2323 return N->getOpcode() == ISD::LOAD;
2324 }
2325};
2326
2327/// This class is used to represent ISD::STORE nodes.
2328class StoreSDNode : public LSBaseSDNode {
2329 friend class SelectionDAG;
2330
2331 StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2332 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
2333 MachineMemOperand *MMO)
2334 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
2335 StoreSDNodeBits.IsTruncating = isTrunc;
2336 assert(!readMem() && "Store MachineMemOperand is a load!")(static_cast <bool> (!readMem() && "Store MachineMemOperand is a load!"
) ? void (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2336, __extension__
__PRETTY_FUNCTION__))
;
2337 assert(writeMem() && "Store MachineMemOperand is not a store!")(static_cast <bool> (writeMem() && "Store MachineMemOperand is not a store!"
) ? void (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2337, __extension__
__PRETTY_FUNCTION__))
;
2338 }
2339
2340public:
2341 /// Return true if the op does a truncation before store.
2342 /// For integers this is the same as doing a TRUNCATE and storing the result.
2343 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2344 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2345 void setTruncatingStore(bool Truncating) {
2346 StoreSDNodeBits.IsTruncating = Truncating;
2347 }
2348
2349 const SDValue &getValue() const { return getOperand(1); }
2350 const SDValue &getBasePtr() const { return getOperand(2); }
2351 const SDValue &getOffset() const { return getOperand(3); }
2352
2353 static bool classof(const SDNode *N) {
2354 return N->getOpcode() == ISD::STORE;
2355 }
2356};
2357
2358/// This base class is used to represent VP_LOAD and VP_STORE nodes
2359class VPLoadStoreSDNode : public MemSDNode {
2360public:
2361 friend class SelectionDAG;
2362
2363 VPLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2364 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2365 MachineMemOperand *MMO)
2366 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2367 LSBaseSDNodeBits.AddressingMode = AM;
2368 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2368, __extension__
__PRETTY_FUNCTION__))
;
2369 }
2370
2371 // VPLoadSDNode (Chain, Ptr, Offset, Mask, EVL)
2372 // VPStoreSDNode (Chain, Data, Ptr, Offset, Mask, EVL)
2373 // Mask is a vector of i1 elements;
2374 // the type of EVL is TLI.getVPExplicitVectorLengthTy().
2375 const SDValue &getOffset() const {
2376 return getOperand(getOpcode() == ISD::VP_LOAD ? 2 : 3);
2377 }
2378 const SDValue &getBasePtr() const {
2379 return getOperand(getOpcode() == ISD::VP_LOAD ? 1 : 2);
2380 }
2381 const SDValue &getMask() const {
2382 return getOperand(getOpcode() == ISD::VP_LOAD ? 3 : 4);
2383 }
2384 const SDValue &getVectorLength() const {
2385 return getOperand(getOpcode() == ISD::VP_LOAD ? 4 : 5);
2386 }
2387
2388 /// Return the addressing mode for this load or store:
2389 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2390 ISD::MemIndexedMode getAddressingMode() const {
2391 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2392 }
2393
2394 /// Return true if this is a pre/post inc/dec load/store.
2395 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2396
2397 /// Return true if this is NOT a pre/post inc/dec load/store.
2398 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2399
2400 static bool classof(const SDNode *N) {
2401 return N->getOpcode() == ISD::VP_LOAD || N->getOpcode() == ISD::VP_STORE;
2402 }
2403};
2404
2405/// This class is used to represent a VP_LOAD node
2406class VPLoadSDNode : public VPLoadStoreSDNode {
2407public:
2408 friend class SelectionDAG;
2409
2410 VPLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2411 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, bool isExpanding,
2412 EVT MemVT, MachineMemOperand *MMO)
2413 : VPLoadStoreSDNode(ISD::VP_LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2414 LoadSDNodeBits.ExtTy = ETy;
2415 LoadSDNodeBits.IsExpanding = isExpanding;
2416 }
2417
2418 ISD::LoadExtType getExtensionType() const {
2419 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2420 }
2421
2422 const SDValue &getBasePtr() const { return getOperand(1); }
2423 const SDValue &getOffset() const { return getOperand(2); }
2424 const SDValue &getMask() const { return getOperand(3); }
2425 const SDValue &getVectorLength() const { return getOperand(4); }
2426
2427 static bool classof(const SDNode *N) {
2428 return N->getOpcode() == ISD::VP_LOAD;
2429 }
2430 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2431};
2432
2433/// This class is used to represent a VP_STORE node
2434class VPStoreSDNode : public VPLoadStoreSDNode {
2435public:
2436 friend class SelectionDAG;
2437
2438 VPStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2439 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2440 EVT MemVT, MachineMemOperand *MMO)
2441 : VPLoadStoreSDNode(ISD::VP_STORE, Order, dl, VTs, AM, MemVT, MMO) {
2442 StoreSDNodeBits.IsTruncating = isTrunc;
2443 StoreSDNodeBits.IsCompressing = isCompressing;
2444 }
2445
2446 /// Return true if this is a truncating store.
2447 /// For integers this is the same as doing a TRUNCATE and storing the result.
2448 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2449 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2450
2451 /// Returns true if the op does a compression to the vector before storing.
2452 /// The node contiguously stores the active elements (integers or floats)
2453 /// in src (those with their respective bit set in writemask k) to unaligned
2454 /// memory at base_addr.
2455 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2456
2457 const SDValue &getValue() const { return getOperand(1); }
2458 const SDValue &getBasePtr() const { return getOperand(2); }
2459 const SDValue &getOffset() const { return getOperand(3); }
2460 const SDValue &getMask() const { return getOperand(4); }
2461 const SDValue &getVectorLength() const { return getOperand(5); }
2462
2463 static bool classof(const SDNode *N) {
2464 return N->getOpcode() == ISD::VP_STORE;
2465 }
2466};
2467
2468/// This base class is used to represent MLOAD and MSTORE nodes
2469class MaskedLoadStoreSDNode : public MemSDNode {
2470public:
2471 friend class SelectionDAG;
2472
2473 MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2474 const DebugLoc &dl, SDVTList VTs,
2475 ISD::MemIndexedMode AM, EVT MemVT,
2476 MachineMemOperand *MMO)
2477 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2478 LSBaseSDNodeBits.AddressingMode = AM;
2479 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2479, __extension__
__PRETTY_FUNCTION__))
;
2480 }
2481
2482 // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru)
2483 // MaskedStoreSDNode (Chain, data, ptr, offset, mask)
2484 // Mask is a vector of i1 elements
2485 const SDValue &getOffset() const {
2486 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2487 }
2488 const SDValue &getMask() const {
2489 return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4);
2490 }
2491
2492 /// Return the addressing mode for this load or store:
2493 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2494 ISD::MemIndexedMode getAddressingMode() const {
2495 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2496 }
2497
2498 /// Return true if this is a pre/post inc/dec load/store.
2499 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2500
2501 /// Return true if this is NOT a pre/post inc/dec load/store.
2502 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2503
2504 static bool classof(const SDNode *N) {
2505 return N->getOpcode() == ISD::MLOAD ||
2506 N->getOpcode() == ISD::MSTORE;
2507 }
2508};
2509
2510/// This class is used to represent an MLOAD node
2511class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
2512public:
2513 friend class SelectionDAG;
2514
2515 MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2516 ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
2517 bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
2518 : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) {
2519 LoadSDNodeBits.ExtTy = ETy;
2520 LoadSDNodeBits.IsExpanding = IsExpanding;
2521 }
2522
2523 ISD::LoadExtType getExtensionType() const {
2524 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2525 }
2526
2527 const SDValue &getBasePtr() const { return getOperand(1); }
2528 const SDValue &getOffset() const { return getOperand(2); }
2529 const SDValue &getMask() const { return getOperand(3); }
2530 const SDValue &getPassThru() const { return getOperand(4); }
2531
2532 static bool classof(const SDNode *N) {
2533 return N->getOpcode() == ISD::MLOAD;
2534 }
2535
2536 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2537};
2538
2539/// This class is used to represent an MSTORE node
2540class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
2541public:
2542 friend class SelectionDAG;
2543
2544 MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2545 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2546 EVT MemVT, MachineMemOperand *MMO)
2547 : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) {
2548 StoreSDNodeBits.IsTruncating = isTrunc;
2549 StoreSDNodeBits.IsCompressing = isCompressing;
2550 }
2551
2552 /// Return true if the op does a truncation before store.
2553 /// For integers this is the same as doing a TRUNCATE and storing the result.
2554 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2555 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2556
2557 /// Returns true if the op does a compression to the vector before storing.
2558 /// The node contiguously stores the active elements (integers or floats)
2559 /// in src (those with their respective bit set in writemask k) to unaligned
2560 /// memory at base_addr.
2561 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2562
2563 const SDValue &getValue() const { return getOperand(1); }
2564 const SDValue &getBasePtr() const { return getOperand(2); }
2565 const SDValue &getOffset() const { return getOperand(3); }
2566 const SDValue &getMask() const { return getOperand(4); }
2567
2568 static bool classof(const SDNode *N) {
2569 return N->getOpcode() == ISD::MSTORE;
2570 }
2571};
2572
2573/// This is a base class used to represent
2574/// VP_GATHER and VP_SCATTER nodes
2575///
2576class VPGatherScatterSDNode : public MemSDNode {
2577public:
2578 friend class SelectionDAG;
2579
2580 VPGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2581 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2582 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2583 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2584 LSBaseSDNodeBits.AddressingMode = IndexType;
2585 assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType &&
"Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2585, __extension__
__PRETTY_FUNCTION__))
;
2586 }
2587
2588 /// How is Index applied to BasePtr when computing addresses.
2589 ISD::MemIndexType getIndexType() const {
2590 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2591 }
2592 bool isIndexScaled() const {
2593 return (getIndexType() == ISD::SIGNED_SCALED) ||
2594 (getIndexType() == ISD::UNSIGNED_SCALED);
2595 }
2596 bool isIndexSigned() const {
2597 return (getIndexType() == ISD::SIGNED_SCALED) ||
2598 (getIndexType() == ISD::SIGNED_UNSCALED);
2599 }
2600
2601 // In the both nodes address is Op1, mask is Op2:
2602 // VPGatherSDNode (Chain, base, index, scale, mask, vlen)
2603 // VPScatterSDNode (Chain, value, base, index, scale, mask, vlen)
2604 // Mask is a vector of i1 elements
2605 const SDValue &getBasePtr() const {
2606 return getOperand((getOpcode() == ISD::VP_GATHER) ? 1 : 2);
2607 }
2608 const SDValue &getIndex() const {
2609 return getOperand((getOpcode() == ISD::VP_GATHER) ? 2 : 3);
2610 }
2611 const SDValue &getScale() const {
2612 return getOperand((getOpcode() == ISD::VP_GATHER) ? 3 : 4);
2613 }
2614 const SDValue &getMask() const {
2615 return getOperand((getOpcode() == ISD::VP_GATHER) ? 4 : 5);
2616 }
2617 const SDValue &getVectorLength() const {
2618 return getOperand((getOpcode() == ISD::VP_GATHER) ? 5 : 6);
2619 }
2620
2621 static bool classof(const SDNode *N) {
2622 return N->getOpcode() == ISD::VP_GATHER ||
2623 N->getOpcode() == ISD::VP_SCATTER;
2624 }
2625};
2626
2627/// This class is used to represent an VP_GATHER node
2628///
2629class VPGatherSDNode : public VPGatherScatterSDNode {
2630public:
2631 friend class SelectionDAG;
2632
2633 VPGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2634 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2635 : VPGatherScatterSDNode(ISD::VP_GATHER, Order, dl, VTs, MemVT, MMO,
2636 IndexType) {}
2637
2638 static bool classof(const SDNode *N) {
2639 return N->getOpcode() == ISD::VP_GATHER;
2640 }
2641};
2642
2643/// This class is used to represent an VP_SCATTER node
2644///
2645class VPScatterSDNode : public VPGatherScatterSDNode {
2646public:
2647 friend class SelectionDAG;
2648
2649 VPScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2650 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2651 : VPGatherScatterSDNode(ISD::VP_SCATTER, Order, dl, VTs, MemVT, MMO,
2652 IndexType) {}
2653
2654 const SDValue &getValue() const { return getOperand(1); }
2655
2656 static bool classof(const SDNode *N) {
2657 return N->getOpcode() == ISD::VP_SCATTER;
2658 }
2659};
2660
2661/// This is a base class used to represent
2662/// MGATHER and MSCATTER nodes
2663///
2664class MaskedGatherScatterSDNode : public MemSDNode {
2665public:
2666 friend class SelectionDAG;
2667
2668 MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2669 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2670 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2671 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2672 LSBaseSDNodeBits.AddressingMode = IndexType;
2673 assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType &&
"Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2673, __extension__
__PRETTY_FUNCTION__))
;
2674 }
2675
2676 /// How is Index applied to BasePtr when computing addresses.
2677 ISD::MemIndexType getIndexType() const {
2678 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2679 }
2680 void setIndexType(ISD::MemIndexType IndexType) {
2681 LSBaseSDNodeBits.AddressingMode = IndexType;
2682 }
2683 bool isIndexScaled() const {
2684 return (getIndexType() == ISD::SIGNED_SCALED) ||
2685 (getIndexType() == ISD::UNSIGNED_SCALED);
2686 }
2687 bool isIndexSigned() const {
2688 return (getIndexType() == ISD::SIGNED_SCALED) ||
2689 (getIndexType() == ISD::SIGNED_UNSCALED);
2690 }
2691
2692 // In the both nodes address is Op1, mask is Op2:
2693 // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale)
2694 // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
2695 // Mask is a vector of i1 elements
2696 const SDValue &getBasePtr() const { return getOperand(3); }
2697 const SDValue &getIndex() const { return getOperand(4); }
2698 const SDValue &getMask() const { return getOperand(2); }
2699 const SDValue &getScale() const { return getOperand(5); }
2700
2701 static bool classof(const SDNode *N) {
2702 return N->getOpcode() == ISD::MGATHER ||
2703 N->getOpcode() == ISD::MSCATTER;
2704 }
2705};
2706
2707/// This class is used to represent an MGATHER node
2708///
2709class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
2710public:
2711 friend class SelectionDAG;
2712
2713 MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2714 EVT MemVT, MachineMemOperand *MMO,
2715 ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
2716 : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
2717 IndexType) {
2718 LoadSDNodeBits.ExtTy = ETy;
2719 }
2720
2721 const SDValue &getPassThru() const { return getOperand(1); }
2722
2723 ISD::LoadExtType getExtensionType() const {
2724 return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
2725 }
2726
2727 static bool classof(const SDNode *N) {
2728 return N->getOpcode() == ISD::MGATHER;
2729 }
2730};
2731
2732/// This class is used to represent an MSCATTER node
2733///
2734class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
2735public:
2736 friend class SelectionDAG;
2737
2738 MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2739 EVT MemVT, MachineMemOperand *MMO,
2740 ISD::MemIndexType IndexType, bool IsTrunc)
2741 : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
2742 IndexType) {
2743 StoreSDNodeBits.IsTruncating = IsTrunc;
2744 }
2745
2746 /// Return true if the op does a truncation before store.
2747 /// For integers this is the same as doing a TRUNCATE and storing the result.
2748 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2749 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2750
2751 const SDValue &getValue() const { return getOperand(1); }
2752
2753 static bool classof(const SDNode *N) {
2754 return N->getOpcode() == ISD::MSCATTER;
2755 }
2756};
2757
2758/// An SDNode that represents everything that will be needed
2759/// to construct a MachineInstr. These nodes are created during the
2760/// instruction selection proper phase.
2761///
2762/// Note that the only supported way to set the `memoperands` is by calling the
2763/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
2764/// inside the DAG rather than in the node.
2765class MachineSDNode : public SDNode {
2766private:
2767 friend class SelectionDAG;
2768
2769 MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
2770 : SDNode(Opc, Order, DL, VTs) {}
2771
2772 // We use a pointer union between a single `MachineMemOperand` pointer and
2773 // a pointer to an array of `MachineMemOperand` pointers. This is null when
2774 // the number of these is zero, the single pointer variant used when the
2775 // number is one, and the array is used for larger numbers.
2776 //
2777 // The array is allocated via the `SelectionDAG`'s allocator and so will
2778 // always live until the DAG is cleaned up and doesn't require ownership here.
2779 //
2780 // We can't use something simpler like `TinyPtrVector` here because `SDNode`
2781 // subclasses aren't managed in a conforming C++ manner. See the comments on
2782 // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
2783 // constraint here is that these don't manage memory with their constructor or
2784 // destructor and can be initialized to a good state even if they start off
2785 // uninitialized.
2786 PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
2787
2788 // Note that this could be folded into the above `MemRefs` member if doing so
2789 // is advantageous at some point. We don't need to store this in most cases.
2790 // However, at the moment this doesn't appear to make the allocation any
2791 // smaller and makes the code somewhat simpler to read.
2792 int NumMemRefs = 0;
2793
2794public:
2795 using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
2796
2797 ArrayRef<MachineMemOperand *> memoperands() const {
2798 // Special case the common cases.
2799 if (NumMemRefs == 0)
2800 return {};
2801 if (NumMemRefs == 1)
2802 return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
2803
2804 // Otherwise we have an actual array.
2805 return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
2806 }
2807 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
2808 mmo_iterator memoperands_end() const { return memoperands().end(); }
2809 bool memoperands_empty() const { return memoperands().empty(); }
2810
2811 /// Clear out the memory reference descriptor list.
2812 void clearMemRefs() {
2813 MemRefs = nullptr;
2814 NumMemRefs = 0;
2815 }
2816
2817 static bool classof(const SDNode *N) {
2818 return N->isMachineOpcode();
2819 }
2820};
2821
2822/// An SDNode that records if a register contains a value that is guaranteed to
2823/// be aligned accordingly.
2824class AssertAlignSDNode : public SDNode {
2825 Align Alignment;
2826
2827public:
2828 AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A)
2829 : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {}
2830
2831 Align getAlign() const { return Alignment; }
2832
2833 static bool classof(const SDNode *N) {
2834 return N->getOpcode() == ISD::AssertAlign;
2835 }
2836};
2837
2838class SDNodeIterator {
2839 const SDNode *Node;
2840 unsigned Operand;
2841
2842 SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
2843
2844public:
2845 using iterator_category = std::forward_iterator_tag;
2846 using value_type = SDNode;
2847 using difference_type = std::ptrdiff_t;
2848 using pointer = value_type *;
2849 using reference = value_type &;
2850
2851 bool operator==(const SDNodeIterator& x) const {
2852 return Operand == x.Operand;
2853 }
2854 bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
2855
2856 pointer operator*() const {
2857 return Node->getOperand(Operand).getNode();
2858 }
2859 pointer operator->() const { return operator*(); }
2860
2861 SDNodeIterator& operator++() { // Preincrement
2862 ++Operand;
2863 return *this;
2864 }
2865 SDNodeIterator operator++(int) { // Postincrement
2866 SDNodeIterator tmp = *this; ++*this; return tmp;
2867 }
2868 size_t operator-(SDNodeIterator Other) const {
2869 assert(Node == Other.Node &&(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2870, __extension__
__PRETTY_FUNCTION__))
2870 "Cannot compare iterators of two different nodes!")(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2870, __extension__
__PRETTY_FUNCTION__))
;
2871 return Operand - Other.Operand;
2872 }
2873
2874 static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
2875 static SDNodeIterator end (const SDNode *N) {
2876 return SDNodeIterator(N, N->getNumOperands());
2877 }
2878
2879 unsigned getOperand() const { return Operand; }
2880 const SDNode *getNode() const { return Node; }
2881};
2882
2883template <> struct GraphTraits<SDNode*> {
2884 using NodeRef = SDNode *;
2885 using ChildIteratorType = SDNodeIterator;
2886
2887 static NodeRef getEntryNode(SDNode *N) { return N; }
2888
2889 static ChildIteratorType child_begin(NodeRef N) {
2890 return SDNodeIterator::begin(N);
2891 }
2892
2893 static ChildIteratorType child_end(NodeRef N) {
2894 return SDNodeIterator::end(N);
2895 }
2896};
2897
2898/// A representation of the largest SDNode, for use in sizeof().
2899///
2900/// This needs to be a union because the largest node differs on 32 bit systems
2901/// with 4 and 8 byte pointer alignment, respectively.
2902using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
2903 BlockAddressSDNode,
2904 GlobalAddressSDNode,
2905 PseudoProbeSDNode>;
2906
2907/// The SDNode class with the greatest alignment requirement.
2908using MostAlignedSDNode = GlobalAddressSDNode;
2909
2910namespace ISD {
2911
2912 /// Returns true if the specified node is a non-extending and unindexed load.
2913 inline bool isNormalLoad(const SDNode *N) {
2914 const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
2915 return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
2916 Ld->getAddressingMode() == ISD::UNINDEXED;
2917 }
2918
2919 /// Returns true if the specified node is a non-extending load.
2920 inline bool isNON_EXTLoad(const SDNode *N) {
2921 return isa<LoadSDNode>(N) &&
2922 cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
2923 }
2924
2925 /// Returns true if the specified node is a EXTLOAD.
2926 inline bool isEXTLoad(const SDNode *N) {
2927 return isa<LoadSDNode>(N) &&
2928 cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
2929 }
2930
2931 /// Returns true if the specified node is a SEXTLOAD.
2932 inline bool isSEXTLoad(const SDNode *N) {
2933 return isa<LoadSDNode>(N) &&
2934 cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
2935 }
2936
2937 /// Returns true if the specified node is a ZEXTLOAD.
2938 inline bool isZEXTLoad(const SDNode *N) {
2939 return isa<LoadSDNode>(N) &&
2940 cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
2941 }
2942
2943 /// Returns true if the specified node is an unindexed load.
2944 inline bool isUNINDEXEDLoad(const SDNode *N) {
2945 return isa<LoadSDNode>(N) &&
2946 cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2947 }
2948
2949 /// Returns true if the specified node is a non-truncating
2950 /// and unindexed store.
2951 inline bool isNormalStore(const SDNode *N) {
2952 const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
2953 return St && !St->isTruncatingStore() &&
2954 St->getAddressingMode() == ISD::UNINDEXED;
2955 }
2956
2957 /// Returns true if the specified node is an unindexed store.
2958 inline bool isUNINDEXEDStore(const SDNode *N) {
2959 return isa<StoreSDNode>(N) &&
2960 cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2961 }
2962
2963 /// Attempt to match a unary predicate against a scalar/splat constant or
2964 /// every element of a constant BUILD_VECTOR.
2965 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2966 bool matchUnaryPredicate(SDValue Op,
2967 std::function<bool(ConstantSDNode *)> Match,
2968 bool AllowUndefs = false);
2969
2970 /// Attempt to match a binary predicate against a pair of scalar/splat
2971 /// constants or every element of a pair of constant BUILD_VECTORs.
2972 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2973 /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
2974 bool matchBinaryPredicate(
2975 SDValue LHS, SDValue RHS,
2976 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
2977 bool AllowUndefs = false, bool AllowTypeMismatch = false);
2978
2979 /// Returns true if the specified value is the overflow result from one
2980 /// of the overflow intrinsic nodes.
2981 inline bool isOverflowIntrOpRes(SDValue Op) {
2982 unsigned Opc = Op.getOpcode();
2983 return (Op.getResNo() == 1 &&
2984 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
2985 Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO));
2986 }
2987
2988} // end namespace ISD
2989
2990} // end namespace llvm
2991
2992#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H